repo_name
stringlengths
6
67
path
stringlengths
5
185
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
1.02k
962k
license
stringclasses
15 values
batteries03/generative-adversarial-networks
src/mnist.py
1
15569
import numpy as np import pickle import random import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import time, os import tensorflow as tf import layers EPOCHS = 100 STEPS_PER_CHECKPOINT = 5 BATCH_SIZE = 100 TRAINING_DIR = './model/' with open('../dataset/mnist/mnist.pkl', 'rb') as f: data = pickle.load(f, encoding='latin') (train_images, train_labels), (valid_images, valid_labels), (test_images, test_labels) = data #разбивка на тренировочные картинки, проверочные, тестовые. train_images = np.reshape(train_images, [-1, 28, 28, 1]) train_labels = np.reshape(train_labels, [-1, 1]) valid_images = np.reshape(valid_images, [-1, 28, 28, 1]) valid_labels = np.reshape(valid_labels, [-1, 1]) test_images = np.reshape(test_images, [-1, 28, 28, 1]) test_labels = np.reshape(test_labels, [-1, 1]) # дополнение до размера 32x32 train_images = np.pad(train_images, ((0, 0), (2, 2), (2, 2), (0, 0)), mode='edge') valid_images = np.pad(valid_images, ((0, 0), (2, 2), (2, 2), (0, 0)), mode='edge') test_images = np.pad(test_images, ((0, 0), (2, 2), (2, 2), (0, 0)), mode='edge') #функция рисования результатов работы генератора def plot(samples): fig = plt.figure(figsize=(4, 4)) gs = gridspec.GridSpec(4, 4) gs.update(wspace=0.05, hspace=0.05) for i, sample in enumerate(samples): ax = plt.subplot(gs[i]) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_aspect('equal') plt.imshow(sample.squeeze(), cmap='gray') return fig def sample_seed_inputs(m, n): return np.random.uniform(-1., 1., size=[m, n]) GENERATOR_SEED_SIZE = 64 def generator(inputs, batch_size, training): with tf.name_scope('generator'): net = layers.fully_connected_layer(1, inputs, 4 * 4 * 512, None) net = tf.reshape(net, [batch_size, 4, 4, 512]) net = layers.batch_norm(net, training, name='bn1') net = layers.conv2d_transpose_layer(1, net, [5, 5, 256], batch_size, stride=2) net = layers.batch_norm(net, training, name='bn2') net = layers.conv2d_transpose_layer(2, net, [5, 5, 128], batch_size, stride=2) net = layers.batch_norm(net, training, name='bn3') net = layers.conv2d_transpose_layer(3, net, [5, 5, 1], batch_size, tf.nn.sigmoid, stride=2, zero_biases=True) return net def discriminator_base(inputs): with tf.name_scope('discriminator_base'): #net = layers.batch_norm(inputs, training, name='bn1') net = layers.conv2d_layer(1, inputs, [5, 5, 16], lambda x: layers.lrelu(x, 0.2), stride=2) #net = layers.batch_norm(net, training, name='bn2') net = layers.conv2d_layer(2, net, [5, 5, 32], lambda x: layers.lrelu(x, 0.2), stride=2) #net = layers.batch_norm(net, training, name='bn3') net = layers.conv2d_layer(3, net, [5, 5, 64], lambda x: layers.lrelu(x, 0.2), stride=2) #net = layers.batch_norm(net, training, name='bn4') net = layers.conv2d_layer(4, net, [5, 5, 128], lambda x: layers.lrelu(x, 0.2), stride=2) net = layers.max_pool2d(net, [2, 2]) #net = layers.batch_norm(net, training, name='bn5') return net def discriminator_class(inputs): with tf.name_scope('discriminator_class'): net = layers.fully_connected_layer(1, inputs, 16) net = layers.fully_connected_layer(2, net, 1, tf.nn.sigmoid, zero_biases=True, zero_weights=True) return net def discriminator_latent(inputs, categorical_shape, continuous_shape): with tf.name_scope('discriminator_latent'): net = layers.fully_connected_layer(1, inputs, 16) cat_net = layers.fully_connected_layer(2, net, categorical_shape, tf.nn.softmax, zero_biases=True, zero_weights=True) con_net = layers.fully_connected_layer(3, net, continuous_shape, tf.nn.tanh, zero_biases=True, zero_weights=True) return cat_net, con_net #обнуление графа tf.reset_default_graph() #создание сети в графе with tf.name_scope('GAN'): training_mode = tf.placeholder(tf.bool, name='training_mode') categorical_inputs = tf.placeholder(tf.int32, [BATCH_SIZE, 1], name='categorical_inputs') _categorical_inputs = tf.cast(tf.one_hot(tf.squeeze(categorical_inputs), 10), tf.float32) _categorical_inputs = tf.reshape(_categorical_inputs, [BATCH_SIZE, 10]) continuous_inputs = tf.placeholder(tf.float32, [BATCH_SIZE, 2], name='continuous_inputs') generator_seed_inputs = tf.placeholder(tf.float32, [BATCH_SIZE, GENERATOR_SEED_SIZE], name='generator_seed_inputs') _generator_inputs = generator_seed_inputs with tf.variable_scope('generator'): _inputs = tf.concat([_generator_inputs, _categorical_inputs, continuous_inputs], axis=1) generator_outputs = generator(_inputs, BATCH_SIZE, training_mode) discriminator_inputs = tf.placeholder(tf.float32, [BATCH_SIZE] + list(train_images.shape[1:]), name='inputs') with tf.variable_scope('discriminator-base') as vs: with tf.name_scope('real'): real_net = discriminator_base(discriminator_inputs) vs.reuse_variables() with tf.name_scope('fake'): fake_net = discriminator_base(generator_outputs) with tf.variable_scope('discriminator-class') as vs: with tf.name_scope('real'): discriminator_outputs_real_prob = discriminator_class(real_net) vs.reuse_variables() with tf.name_scope('fake'): discriminator_outputs_fake_prob = discriminator_class(fake_net) with tf.variable_scope('discriminator-latent'): latent_restored_outputs = discriminator_latent(fake_net, _categorical_inputs.shape[1], continuous_inputs.shape[1]) #элементы графа для обучения сети with tf.name_scope('training'): with tf.name_scope('discriminator'): discriminator_targets_real = tf.ones_like(discriminator_outputs_real_prob, name='discriminator_targets_real') discriminator_targets_fake = tf.zeros_like(discriminator_outputs_fake_prob, name='discriminator_targets_fake') _loss_real = tf.reduce_mean(tf.log(tf.clip_by_value(discriminator_outputs_real_prob, 1e-9, 1))) _loss_fake = tf.reduce_mean(tf.log(tf.clip_by_value(1 - discriminator_outputs_fake_prob, 1e-9, 1))) discriminator_loss = _loss_real + _loss_fake #минимизация функции потерь по весовым коэффициентам discriminator_lr_var = tf.Variable(1e-3, trainable=False) params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator-base') params = params + tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator-class') optimizer = tf.train.AdamOptimizer(discriminator_lr_var) discriminator_updates = optimizer.minimize(-discriminator_loss, var_list=params) # maximization with tf.name_scope('generator'): #целевые значения, к которым должна придти сеть в результате обучения generator_targets = tf.ones_like(discriminator_outputs_fake_prob, name='generator_targets') #функция потерь (ошибки) generator_loss = tf.reduce_mean(tf.log(tf.clip_by_value(discriminator_outputs_fake_prob, 1e-9, 1))) #минимизация функции потерь по весовым коэффициентам generator_lr_var = tf.Variable(1e-3, trainable=False) params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator') optimizer = tf.train.AdamOptimizer(generator_lr_var) generator_updates = optimizer.minimize(-generator_loss, var_list=params) # maximization with tf.name_scope('mutual'): cat_out, con_out = latent_restored_outputs cat_loss = tf.reduce_mean(-tf.reduce_sum(_categorical_inputs*tf.log(tf.clip_by_value(cat_out, 1e-9, 1)), axis=1)) con_loss = tf.reduce_mean(0.5 * tf.square(continuous_inputs - con_out)) mutual_loss = cat_loss + con_loss mutual_lr_var = tf.Variable(1e-3, trainable=False) params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator') params = params + tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator-base') params = params + tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator-latent') optimizer = tf.train.AdamOptimizer(mutual_lr_var) mutual_updates = optimizer.minimize(mutual_loss, var_list=params) # сохранение параметров для графа save_vars = tf.global_variables() saver = tf.train.Saver(save_vars) # шаг тренировки. заполняем узлы графа, картинками для обучения, которые используются для входа, и вычисляем обновления весов def train_discriminator_step(session, images, categorical, continuous, seed): input_feed = {} input_feed[discriminator_inputs.name] = images input_feed[categorical_inputs.name] = categorical input_feed[continuous_inputs.name] = continuous input_feed[generator_seed_inputs.name] = seed input_feed[training_mode.name] = True output_feed = [discriminator_updates] _ = session.run(output_feed, input_feed) def train_generator_step(session, categorical, continuous, seed): input_feed = {} input_feed[categorical_inputs.name] = categorical input_feed[continuous_inputs.name] = continuous input_feed[generator_seed_inputs.name] = seed input_feed[training_mode.name] = True output_feed = [generator_updates] _ = session.run(output_feed, input_feed) def train_mutual_step(session, categorical, continuous, seed): input_feed = {} input_feed[categorical_inputs.name] = categorical input_feed[continuous_inputs.name] = continuous input_feed[generator_seed_inputs.name] = seed input_feed[training_mode.name] = True output_feed = [mutual_updates] _ = session.run(output_feed, input_feed) def generator_step(session, categorical, continuous, seed): input_feed = {} input_feed[categorical_inputs.name] = categorical input_feed[continuous_inputs.name] = continuous input_feed[generator_seed_inputs.name] = seed input_feed[training_mode.name] = False return session.run(generator_outputs, input_feed) # заполнение узлов графа картинками и целевыми значениями.расчет функции потерь def valid_step(session, images, categorical, continuous, seed, summary): input_feed = {} input_feed[categorical_inputs.name] = categorical input_feed[discriminator_inputs.name] = images input_feed[continuous_inputs.name] = continuous input_feed[generator_seed_inputs.name] = seed input_feed[training_mode.name] = False output_feed = [generator_loss, discriminator_loss, summary] return session.run(output_feed, input_feed) # цикл обучения if not os.path.exists(TRAINING_DIR): os.makedirs(TRAINING_DIR) if not os.path.exists('./output/'): os.makedirs('./output/') checkpoint_path = os.path.join(TRAINING_DIR, 'GAN.ckpt') tf.summary.scalar('geneartor loss', generator_loss) tf.summary.scalar('generator learning rate', generator_lr_var) tf.summary.scalar('discriminator loss', discriminator_loss) tf.summary.scalar('discriminator learning rate', discriminator_lr_var) summary_op = tf.summary.merge_all() nbatches = len(train_images) // BATCH_SIZE with tf.Session() as session: train_summary_writer = tf.summary.FileWriter(os.path.join(TRAINING_DIR, 'summary', 'train'), session.graph) valid_summary_writer = tf.summary.FileWriter(os.path.join(TRAINING_DIR, 'summary', 'valid'), session.graph) print('Initializing parameters ', flush=True, end='') session.run(tf.global_variables_initializer()) print('[OK]', flush=True) ckpt = tf.train.get_checkpoint_state(TRAINING_DIR) if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): saver.restore(session, ckpt.model_checkpoint_path) tf.train.write_graph(session.graph_def, TRAINING_DIR, 'GAN.pb', as_text=False) print('Start training.', flush=True) try: for epoch in range(0, EPOCHS): cat = np.random.randint(0, 10, [BATCH_SIZE, 1]) con = np.random.uniform(-1, 1, size=[BATCH_SIZE, 2]) seed = sample_seed_inputs(BATCH_SIZE, GENERATOR_SEED_SIZE) samples = generator_step(session, cat, con, seed)[:16] fig = plot(samples) plt.savefig('output/{}.png'.format(str(epoch).zfill(3)), bbox_inches='tight') plt.close(fig) start = time.time() print('Epoch #%i: ' % (epoch+1), end='', flush=True) for b in range(nbatches): batch = np.arange(b*BATCH_SIZE, (b+1)*BATCH_SIZE) images = train_images[batch] cat = np.random.randint(0, 10, [BATCH_SIZE, 1]) con = np.random.uniform(-1, 1, size=[BATCH_SIZE, 2]) seed = sample_seed_inputs(BATCH_SIZE, GENERATOR_SEED_SIZE) train_discriminator_step(session, images, cat, con, seed) train_generator_step(session, cat, con, seed) train_mutual_step(session, cat, con, seed) batch = np.random.choice(len(train_images), BATCH_SIZE, replace=False) images = train_images[batch] cat = np.random.randint(0, 10, [BATCH_SIZE, 1]) con = np.random.uniform(-1, 1, size=[BATCH_SIZE, 2]) seed = sample_seed_inputs(BATCH_SIZE, GENERATOR_SEED_SIZE) train_gen_loss, train_dis_loss, summary = valid_step(session, images, cat, con, seed, summary_op) train_summary_writer.add_summary(summary, epoch) batch = np.random.choice(len(valid_images), BATCH_SIZE, replace=False) images = test_images[batch] cat = np.random.randint(0, 10, [BATCH_SIZE, 1]) con = np.random.uniform(-1, 1, size=[BATCH_SIZE, 2]) seed = sample_seed_inputs(BATCH_SIZE, GENERATOR_SEED_SIZE) valid_gen_loss, valid_dis_loss, summary = valid_step(session, images, cat, con, seed, summary_op) valid_summary_writer.add_summary(summary, epoch) elapsed = time.time() - start print('train generator loss = %.6f, train discriminator loss = %.6f, valid generator loss = %.6f, valid discriminator loss = %.6f, elapsed %.3f sec.' % (train_gen_loss, train_dis_loss, valid_gen_loss, valid_dis_loss, elapsed), flush=True) if (epoch+1) % STEPS_PER_CHECKPOINT == 0: saver.save(session, checkpoint_path) print('Training process is finished.', flush=True) cat = np.random.randint(0, 9, [BATCH_SIZE, 1]) con = np.random.uniform(-1, 1, size=[BATCH_SIZE, 2]) seed = sample_seed_inputs(BATCH_SIZE, GENERATOR_SEED_SIZE) samples = generator_step(session, cat, con, seed)[:16] fig = plot(samples) plt.savefig('output/{}.png'.format(str(EPOCHS).zfill(3)), bbox_inches='tight') plt.close(fig) finally: saver.save(session, checkpoint_path) tf.train.write_graph(session.graph_def, TRAINING_DIR, 'GAN.pb', as_text=False)
mit
huobaowangxi/scikit-learn
sklearn/grid_search.py
103
36232
""" The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters of an estimator. """ from __future__ import print_function # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>, # Gael Varoquaux <gael.varoquaux@normalesup.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause from abc import ABCMeta, abstractmethod from collections import Mapping, namedtuple, Sized from functools import partial, reduce from itertools import product import operator import warnings import numpy as np from .base import BaseEstimator, is_classifier, clone from .base import MetaEstimatorMixin, ChangedBehaviorWarning from .cross_validation import check_cv from .cross_validation import _fit_and_score from .externals.joblib import Parallel, delayed from .externals import six from .utils import check_random_state from .utils.random import sample_without_replacement from .utils.validation import _num_samples, indexable from .utils.metaestimators import if_delegate_has_method from .metrics.scorer import check_scoring __all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point', 'ParameterSampler', 'RandomizedSearchCV'] class ParameterGrid(object): """Grid of parameters with a discrete number of values for each. Can be used to iterate over parameter value combinations with the Python built-in function iter. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- param_grid : dict of string to sequence, or sequence of such The parameter grid to explore, as a dictionary mapping estimator parameters to sequences of allowed values. An empty dict signifies default parameters. A sequence of dicts signifies a sequence of grids to search, and is useful to avoid exploring parameter combinations that make no sense or have no effect. See the examples below. Examples -------- >>> from sklearn.grid_search import ParameterGrid >>> param_grid = {'a': [1, 2], 'b': [True, False]} >>> list(ParameterGrid(param_grid)) == ( ... [{'a': 1, 'b': True}, {'a': 1, 'b': False}, ... {'a': 2, 'b': True}, {'a': 2, 'b': False}]) True >>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}] >>> list(ParameterGrid(grid)) == [{'kernel': 'linear'}, ... {'kernel': 'rbf', 'gamma': 1}, ... {'kernel': 'rbf', 'gamma': 10}] True >>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1} True See also -------- :class:`GridSearchCV`: uses ``ParameterGrid`` to perform a full parallelized parameter search. """ def __init__(self, param_grid): if isinstance(param_grid, Mapping): # wrap dictionary in a singleton list to support either dict # or list of dicts param_grid = [param_grid] self.param_grid = param_grid def __iter__(self): """Iterate over the points in the grid. Returns ------- params : iterator over dict of string to any Yields dictionaries mapping each estimator parameter to one of its allowed values. """ for p in self.param_grid: # Always sort the keys of a dictionary, for reproducibility items = sorted(p.items()) if not items: yield {} else: keys, values = zip(*items) for v in product(*values): params = dict(zip(keys, v)) yield params def __len__(self): """Number of points on the grid.""" # Product function that can handle iterables (np.product can't). product = partial(reduce, operator.mul) return sum(product(len(v) for v in p.values()) if p else 1 for p in self.param_grid) def __getitem__(self, ind): """Get the parameters that would be ``ind``th in iteration Parameters ---------- ind : int The iteration index Returns ------- params : dict of string to any Equal to list(self)[ind] """ # This is used to make discrete sampling without replacement memory # efficient. for sub_grid in self.param_grid: # XXX: could memoize information used here if not sub_grid: if ind == 0: return {} else: ind -= 1 continue # Reverse so most frequent cycling parameter comes first keys, values_lists = zip(*sorted(sub_grid.items())[::-1]) sizes = [len(v_list) for v_list in values_lists] total = np.product(sizes) if ind >= total: # Try the next grid ind -= total else: out = {} for key, v_list, n in zip(keys, values_lists, sizes): ind, offset = divmod(ind, n) out[key] = v_list[offset] return out raise IndexError('ParameterGrid index out of range') class ParameterSampler(object): """Generator on parameters sampled from given distributions. Non-deterministic iterable over random candidate combinations for hyper- parameter search. If all parameters are presented as a list, sampling without replacement is performed. If at least one parameter is given as a distribution, sampling with replacement is used. It is highly recommended to use continuous distributions for continuous parameters. Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept a custom RNG instance and always use the singleton RNG from ``numpy.random``. Hence setting ``random_state`` will not guarantee a deterministic iteration whenever ``scipy.stats`` distributions are used to define the parameter search space. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- param_distributions : dict Dictionary where the keys are parameters and values are distributions from which a parameter is to be sampled. Distributions either have to provide a ``rvs`` function to sample from them, or can be given as a list of values, where a uniform distribution is assumed. n_iter : integer Number of parameter settings that are produced. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. Returns ------- params : dict of string to any **Yields** dictionaries mapping each estimator parameter to as sampled value. Examples -------- >>> from sklearn.grid_search import ParameterSampler >>> from scipy.stats.distributions import expon >>> import numpy as np >>> np.random.seed(0) >>> param_grid = {'a':[1, 2], 'b': expon()} >>> param_list = list(ParameterSampler(param_grid, n_iter=4)) >>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items()) ... for d in param_list] >>> rounded_list == [{'b': 0.89856, 'a': 1}, ... {'b': 0.923223, 'a': 1}, ... {'b': 1.878964, 'a': 2}, ... {'b': 1.038159, 'a': 2}] True """ def __init__(self, param_distributions, n_iter, random_state=None): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state def __iter__(self): # check if all distributions are given as lists # in this case we want to sample without replacement all_lists = np.all([not hasattr(v, "rvs") for v in self.param_distributions.values()]) rnd = check_random_state(self.random_state) if all_lists: # look up sampled parameter settings in parameter grid param_grid = ParameterGrid(self.param_distributions) grid_size = len(param_grid) if grid_size < self.n_iter: raise ValueError( "The total space of parameters %d is smaller " "than n_iter=%d." % (grid_size, self.n_iter) + " For exhaustive searches, use GridSearchCV.") for i in sample_without_replacement(grid_size, self.n_iter, random_state=rnd): yield param_grid[i] else: # Always sort the keys of a dictionary, for reproducibility items = sorted(self.param_distributions.items()) for _ in six.moves.range(self.n_iter): params = dict() for k, v in items: if hasattr(v, "rvs"): params[k] = v.rvs() else: params[k] = v[rnd.randint(len(v))] yield params def __len__(self): """Number of points that will be sampled.""" return self.n_iter def fit_grid_point(X, y, estimator, parameters, train, test, scorer, verbose, error_score='raise', **fit_params): """Run fit on one set of parameters. Parameters ---------- X : array-like, sparse matrix or list Input data. y : array-like or None Targets for input data. estimator : estimator object This estimator will be cloned and then fitted. parameters : dict Parameters to be set on estimator for this grid point. train : ndarray, dtype int or bool Boolean mask or indices for training set. test : ndarray, dtype int or bool Boolean mask or indices for test set. scorer : callable or None. If provided must be a scorer callable object / function with signature ``scorer(estimator, X, y)``. verbose : int Verbosity level. **fit_params : kwargs Additional parameter passed to the fit function of the estimator. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Returns ------- score : float Score of this parameter setting on given training / test split. parameters : dict The parameters that have been evaluated. n_samples_test : int Number of test samples in this split. """ score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, error_score) return score, parameters, n_samples_test def _check_param_grid(param_grid): if hasattr(param_grid, 'items'): param_grid = [param_grid] for p in param_grid: for v in p.values(): if isinstance(v, np.ndarray) and v.ndim > 1: raise ValueError("Parameter array should be one-dimensional.") check = [isinstance(v, k) for k in (list, tuple, np.ndarray)] if True not in check: raise ValueError("Parameter values should be a list.") if len(v) == 0: raise ValueError("Parameter values should be a non-empty " "list.") class _CVScoreTuple (namedtuple('_CVScoreTuple', ('parameters', 'mean_validation_score', 'cv_validation_scores'))): # A raw namedtuple is very memory efficient as it packs the attributes # in a struct to get rid of the __dict__ of attributes in particular it # does not copy the string for the keys on each instance. # By deriving a namedtuple class just to introduce the __repr__ method we # would also reintroduce the __dict__ on the instance. By telling the # Python interpreter that this subclass uses static __slots__ instead of # dynamic attributes. Furthermore we don't need any additional slot in the # subclass so we set __slots__ to the empty tuple. __slots__ = () def __repr__(self): """Simple custom repr to summarize the main info""" return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format( self.mean_validation_score, np.std(self.cv_validation_scores), self.parameters) class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator, MetaEstimatorMixin)): """Base class for hyper parameter search with cross-validation.""" @abstractmethod def __init__(self, estimator, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise'): self.scoring = scoring self.estimator = estimator self.n_jobs = n_jobs self.fit_params = fit_params if fit_params is not None else {} self.iid = iid self.refit = refit self.cv = cv self.verbose = verbose self.pre_dispatch = pre_dispatch self.error_score = error_score @property def _estimator_type(self): return self.estimator._estimator_type def score(self, X, y=None): """Returns the score on the given data, if the estimator has been refit This uses the score defined by ``scoring`` where provided, and the ``best_estimator_.score`` method otherwise. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. Returns ------- score : float Notes ----- * The long-standing behavior of this method changed in version 0.16. * It no longer uses the metric provided by ``estimator.score`` if the ``scoring`` parameter was set when fitting. """ if self.scorer_ is None: raise ValueError("No score function explicitly defined, " "and the estimator doesn't provide one %s" % self.best_estimator_) if self.scoring is not None and hasattr(self.best_estimator_, 'score'): warnings.warn("The long-standing behavior to use the estimator's " "score function in {0}.score has changed. The " "scoring parameter is now used." "".format(self.__class__.__name__), ChangedBehaviorWarning) return self.scorer_(self.best_estimator_, X, y) @if_delegate_has_method(delegate='estimator') def predict(self, X): """Call predict on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.predict(X) @if_delegate_has_method(delegate='estimator') def predict_proba(self, X): """Call predict_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.predict_proba(X) @if_delegate_has_method(delegate='estimator') def predict_log_proba(self, X): """Call predict_log_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_log_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.predict_log_proba(X) @if_delegate_has_method(delegate='estimator') def decision_function(self, X): """Call decision_function on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``decision_function``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.decision_function(X) @if_delegate_has_method(delegate='estimator') def transform(self, X): """Call transform on the estimator with the best found parameters. Only available if the underlying estimator supports ``transform`` and ``refit=True``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.transform(X) @if_delegate_has_method(delegate='estimator') def inverse_transform(self, Xt): """Call inverse_transform on the estimator with the best found parameters. Only available if the underlying estimator implements ``inverse_transform`` and ``refit=True``. Parameters ----------- Xt : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.transform(Xt) def _fit(self, X, y, parameter_iterable): """Actual fitting, performing the search over parameters.""" estimator = self.estimator cv = self.cv self.scorer_ = check_scoring(self.estimator, scoring=self.scoring) n_samples = _num_samples(X) X, y = indexable(X, y) if y is not None: if len(y) != n_samples: raise ValueError('Target variable (y) has a different number ' 'of samples (%i) than data (X: %i samples)' % (len(y), n_samples)) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) if self.verbose > 0: if isinstance(parameter_iterable, Sized): n_candidates = len(parameter_iterable) print("Fitting {0} folds for each of {1} candidates, totalling" " {2} fits".format(len(cv), n_candidates, n_candidates * len(cv))) base_estimator = clone(self.estimator) pre_dispatch = self.pre_dispatch out = Parallel( n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=pre_dispatch )( delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_, train, test, self.verbose, parameters, self.fit_params, return_parameters=True, error_score=self.error_score) for parameters in parameter_iterable for train, test in cv) # Out is a list of triplet: score, estimator, n_test_samples n_fits = len(out) n_folds = len(cv) scores = list() grid_scores = list() for grid_start in range(0, n_fits, n_folds): n_test_samples = 0 score = 0 all_scores = [] for this_score, this_n_test_samples, _, parameters in \ out[grid_start:grid_start + n_folds]: all_scores.append(this_score) if self.iid: this_score *= this_n_test_samples n_test_samples += this_n_test_samples score += this_score if self.iid: score /= float(n_test_samples) else: score /= float(n_folds) scores.append((score, parameters)) # TODO: shall we also store the test_fold_sizes? grid_scores.append(_CVScoreTuple( parameters, score, np.array(all_scores))) # Store the computed scores self.grid_scores_ = grid_scores # Find the best parameters by comparing on the mean validation score: # note that `sorted` is deterministic in the way it breaks ties best = sorted(grid_scores, key=lambda x: x.mean_validation_score, reverse=True)[0] self.best_params_ = best.parameters self.best_score_ = best.mean_validation_score if self.refit: # fit the best estimator using the entire dataset # clone first to work around broken estimators best_estimator = clone(base_estimator).set_params( **best.parameters) if y is not None: best_estimator.fit(X, y, **self.fit_params) else: best_estimator.fit(X, **self.fit_params) self.best_estimator_ = best_estimator return self class GridSearchCV(BaseSearchCV): """Exhaustive search over specified parameter values for an estimator. Important members are fit, predict. GridSearchCV implements a "fit" method and a "predict" method like any classifier except that the parameters of the classifier used to predict is optimized by cross-validation. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods A object of that type is instantiated for each grid point. param_grid : dict or list of dictionaries Dictionary with parameters names (string) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default 1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : integer or cross-validation generator, default=3 If an integer is passed, it is the number of folds. Specific cross-validation objects can be passed, see sklearn.cross_validation module for the list of possible objects refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this GridSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Examples -------- >>> from sklearn import svm, grid_search, datasets >>> iris = datasets.load_iris() >>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} >>> svr = svm.SVC() >>> clf = grid_search.GridSearchCV(svr, parameters) >>> clf.fit(iris.data, iris.target) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS GridSearchCV(cv=None, error_score=..., estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=..., decision_function_shape=None, degree=..., gamma=..., kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=..., verbose=False), fit_params={}, iid=..., n_jobs=1, param_grid=..., pre_dispatch=..., refit=..., scoring=..., verbose=...) Attributes ---------- grid_scores_ : list of named tuples Contains scores for all parameter combinations in param_grid. Each entry corresponds to one parameter setting. Each named tuple has the attributes: * ``parameters``, a dict of parameter settings * ``mean_validation_score``, the mean score over the cross-validation folds * ``cv_validation_scores``, the list of scores for each fold best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. Notes ------ The parameters selected are those that maximize the score of the left out data, unless an explicit score is passed in which case it is used instead. If `n_jobs` was set to a value higher than one, the data is copied for each point in the grid (and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also --------- :class:`ParameterGrid`: generates all the combinations of a an hyperparameter grid. :func:`sklearn.cross_validation.train_test_split`: utility function to split the data into a development set usable for fitting a GridSearchCV instance and an evaluation set for its final evaluation. :func:`sklearn.metrics.make_scorer`: Make a scorer from a performance metric or loss function. """ def __init__(self, estimator, param_grid, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise'): super(GridSearchCV, self).__init__( estimator, scoring, fit_params, n_jobs, iid, refit, cv, verbose, pre_dispatch, error_score) self.param_grid = param_grid _check_param_grid(param_grid) def fit(self, X, y=None): """Run fit with all sets of parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. """ return self._fit(X, y, ParameterGrid(self.param_grid)) class RandomizedSearchCV(BaseSearchCV): """Randomized search on hyper parameters. RandomizedSearchCV implements a "fit" method and a "predict" method like any classifier except that the parameters of the classifier used to predict is optimized by cross-validation. In contrast to GridSearchCV, not all parameter values are tried out, but rather a fixed number of parameter settings is sampled from the specified distributions. The number of parameter settings that are tried is given by n_iter. If all parameters are presented as a list, sampling without replacement is performed. If at least one parameter is given as a distribution, sampling with replacement is used. It is highly recommended to use continuous distributions for continuous parameters. Read more in the :ref:`User Guide <randomized_parameter_search>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods A object of that type is instantiated for each parameter setting. param_distributions : dict Dictionary with parameters names (string) as keys and distributions or lists of parameters to try. Distributions must provide a ``rvs`` method for sampling (such as those from scipy.stats.distributions). If a list is given, it is sampled uniformly. n_iter : int, default=10 Number of parameter settings that are sampled. n_iter trades off runtime vs quality of the solution. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : integer or cross-validation generator, optional If an integer is passed, it is the number of folds (default 3). Specific cross-validation objects can be passed, see sklearn.cross_validation module for the list of possible objects refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this RandomizedSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Attributes ---------- grid_scores_ : list of named tuples Contains scores for all parameter combinations in param_grid. Each entry corresponds to one parameter setting. Each named tuple has the attributes: * ``parameters``, a dict of parameter settings * ``mean_validation_score``, the mean score over the cross-validation folds * ``cv_validation_scores``, the list of scores for each fold best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. Notes ----- The parameters selected are those that maximize the score of the held-out data, according to the scoring parameter. If `n_jobs` was set to a value higher than one, the data is copied for each parameter setting(and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also -------- :class:`GridSearchCV`: Does exhaustive search over a grid of parameters. :class:`ParameterSampler`: A generator over parameter settins, constructed from param_distributions. """ def __init__(self, estimator, param_distributions, n_iter=10, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', random_state=None, error_score='raise'): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state super(RandomizedSearchCV, self).__init__( estimator=estimator, scoring=scoring, fit_params=fit_params, n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose, pre_dispatch=pre_dispatch, error_score=error_score) def fit(self, X, y=None): """Run fit on the estimator with randomly drawn parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. """ sampled_params = ParameterSampler(self.param_distributions, self.n_iter, random_state=self.random_state) return self._fit(X, y, sampled_params)
bsd-3-clause
befelix/GPy
GPy/examples/classification.py
6
7792
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) """ Gaussian Processes classification examples """ import GPy default_seed = 10000 def oil(num_inducing=50, max_iters=100, kernel=None, optimize=True, plot=True): """ Run a Gaussian process classification on the three phase oil data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood. """ try:import pods except ImportError:raise ImportWarning('Need pods for example datasets. See https://github.com/sods/ods, or pip install pods.') data = pods.datasets.oil() X = data['X'] Xtest = data['Xtest'] Y = data['Y'][:, 0:1] Ytest = data['Ytest'][:, 0:1] Y[Y.flatten()==-1] = 0 Ytest[Ytest.flatten()==-1] = 0 # Create GP model m = GPy.models.SparseGPClassification(X, Y, kernel=kernel, num_inducing=num_inducing) m.Ytest = Ytest # Contrain all parameters to be positive #m.tie_params('.*len') m['.*len'] = 10. # Optimize if optimize: m.optimize(messages=1) print(m) #Test probs = m.predict(Xtest)[0] GPy.util.classification.conf_matrix(probs, Ytest) return m def toy_linear_1d_classification(seed=default_seed, optimize=True, plot=True): """ Simple 1D classification example using EP approximation :param seed: seed value for data generation (default is 4). :type seed: int """ try:import pods except ImportError:raise ImportWarning('Need pods for example datasets. See https://github.com/sods/ods, or pip install pods.') data = pods.datasets.toy_linear_1d_classification(seed=seed) Y = data['Y'][:, 0:1] Y[Y.flatten() == -1] = 0 # Model definition m = GPy.models.GPClassification(data['X'], Y) # Optimize if optimize: #m.update_likelihood_approximation() # Parameters optimization: m.optimize() #m.update_likelihood_approximation() #m.pseudo_EM() # Plot if plot: from matplotlib import pyplot as plt fig, axes = plt.subplots(2, 1) m.plot_f(ax=axes[0]) m.plot(ax=axes[1]) print(m) return m def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot=True): """ Simple 1D classification example using Laplace approximation :param seed: seed value for data generation (default is 4). :type seed: int """ try:import pods except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets') data = pods.datasets.toy_linear_1d_classification(seed=seed) Y = data['Y'][:, 0:1] Y[Y.flatten() == -1] = 0 likelihood = GPy.likelihoods.Bernoulli() laplace_inf = GPy.inference.latent_function_inference.Laplace() kernel = GPy.kern.RBF(1) # Model definition m = GPy.core.GP(data['X'], Y, kernel=kernel, likelihood=likelihood, inference_method=laplace_inf) # Optimize if optimize: try: m.optimize('scg', messages=1) except Exception as e: return m # Plot if plot: from matplotlib import pyplot as plt fig, axes = plt.subplots(2, 1) m.plot_f(ax=axes[0]) m.plot(ax=axes[1]) print(m) return m def sparse_toy_linear_1d_classification(num_inducing=10, seed=default_seed, optimize=True, plot=True): """ Sparse 1D classification example :param seed: seed value for data generation (default is 4). :type seed: int """ try:import pods except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets') data = pods.datasets.toy_linear_1d_classification(seed=seed) Y = data['Y'][:, 0:1] Y[Y.flatten() == -1] = 0 # Model definition m = GPy.models.SparseGPClassification(data['X'], Y, num_inducing=num_inducing) m['.*len'] = 4. # Optimize if optimize: m.optimize() # Plot if plot: from matplotlib import pyplot as plt fig, axes = plt.subplots(2, 1) m.plot_f(ax=axes[0]) m.plot(ax=axes[1]) print(m) return m def sparse_toy_linear_1d_classification_uncertain_input(num_inducing=10, seed=default_seed, optimize=True, plot=True): """ Sparse 1D classification example :param seed: seed value for data generation (default is 4). :type seed: int """ try:import pods except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets') import numpy as np data = pods.datasets.toy_linear_1d_classification(seed=seed) Y = data['Y'][:, 0:1] Y[Y.flatten() == -1] = 0 X = data['X'] X_var = np.random.uniform(0.3,0.5,X.shape) # Model definition m = GPy.models.SparseGPClassificationUncertainInput(X, X_var, Y, num_inducing=num_inducing) m['.*len'] = 4. # Optimize if optimize: m.optimize() # Plot if plot: from matplotlib import pyplot as plt fig, axes = plt.subplots(2, 1) m.plot_f(ax=axes[0]) m.plot(ax=axes[1]) print(m) return m def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True): """ Simple 1D classification example using a heavy side gp transformation :param seed: seed value for data generation (default is 4). :type seed: int """ try:import pods except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets') data = pods.datasets.toy_linear_1d_classification(seed=seed) Y = data['Y'][:, 0:1] Y[Y.flatten() == -1] = 0 # Model definition kernel = GPy.kern.RBF(1) likelihood = GPy.likelihoods.Bernoulli(gp_link=GPy.likelihoods.link_functions.Heaviside()) ep = GPy.inference.latent_function_inference.expectation_propagation.EP() m = GPy.core.GP(X=data['X'], Y=Y, kernel=kernel, likelihood=likelihood, inference_method=ep, name='gp_classification_heaviside') #m = GPy.models.GPClassification(data['X'], likelihood=likelihood) # Optimize if optimize: # Parameters optimization: for _ in range(5): m.optimize(max_iters=int(max_iters/5)) print(m) # Plot if plot: from matplotlib import pyplot as plt fig, axes = plt.subplots(2, 1) m.plot_f(ax=axes[0]) m.plot(ax=axes[1]) print(m) return m def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=None, optimize=True, plot=True): """ Run a Gaussian process classification on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood. :param model_type: type of model to fit ['Full', 'FITC', 'DTC']. :param inducing: number of inducing variables (only used for 'FITC' or 'DTC'). :type inducing: int :param seed: seed value for data generation. :type seed: int :param kernel: kernel to use in the model :type kernel: a GPy kernel """ try:import pods except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets') data = pods.datasets.crescent_data(seed=seed) Y = data['Y'] Y[Y.flatten()==-1] = 0 if model_type == 'Full': m = GPy.models.GPClassification(data['X'], Y, kernel=kernel) elif model_type == 'DTC': m = GPy.models.SparseGPClassification(data['X'], Y, kernel=kernel, num_inducing=num_inducing) m['.*len'] = 10. elif model_type == 'FITC': m = GPy.models.FITCClassification(data['X'], Y, kernel=kernel, num_inducing=num_inducing) m['.*len'] = 3. if optimize: m.optimize(messages=1) if plot: m.plot() print(m) return m
bsd-3-clause
aayushkapadia/chemical_reaction_simulator
Simulator/Simulator.py
1
1339
from Reaction import * import matplotlib.pyplot as plt class Simulator: def __init__(self,crn): self.crn = crn self.simulationData = dict() self.crn.prepare() for chemical_name in self.crn.concentrations: self.simulationData[chemical_name] = [] def addInSimulationData(self,concentrations): for chemical_name in concentrations: self.simulationData[chemical_name].append(concentrations[chemical_name]) def simulate(self,timeSteps,filePath): historyFile = open(filePath,'w') historyFile.write(str(self.crn.concentrations)) historyFile.write('\n') self.addInSimulationData(self.crn.concentrations) for i in range(timeSteps): reaction = self.crn.getFastestReaction() if reaction != None: self.crn.doReaction(reaction) self.addInSimulationData(self.crn.concentrations) historyFile.write(str(self.crn.concentrations)) historyFile.write('\n') print 'History file ' + filePath + ' created' def plot(self,listOfChemicals): for chemical in listOfChemicals: initString = 'init = '+str(self.simulationData[chemical][0]) endString = 'end = '+ str(self.simulationData[chemical][-1]) plt.plot(self.simulationData[chemical],label=chemical + '(' + initString +',' + endString + ')') plt.ylabel('Concentration') plt.xlabel('Time (unit time)') plt.legend() plt.show()
mit
marcotcr/lime-experiments
generate_data_for_compare_classifiers.py
1
9116
import sys import copy sys.path.append('..') import time import numpy as np import scipy as sp import sklearn import xgboost import xgboost.sklearn import explainers from load_datasets import * from sklearn.metrics import accuracy_score from sklearn import ensemble, cross_validation import pickle import parzen_windows import argparse def get_random_indices(labels, class_, probability): nonzero = (labels == class_).nonzero()[0] if nonzero.shape[0] == 0 or probability == 0: return [] return np.random.choice(nonzero, int(probability * len(nonzero)) , replace=False) def add_corrupt_feature(feature_name, clean_train, clean_test, dirty_train, train_labels, test_labels, class_probs_dirty, class_probs_clean, fake_prefix='FAKE'): """clean_train, clean_test, dirty_train will be corrupted""" for class_ in set(train_labels): indices = get_random_indices(train_labels, class_, class_probs_clean[class_]) for i in indices: clean_train[i] += ' %s%s%s' % (fake_prefix, feature_name, fake_prefix) indices = get_random_indices(train_labels, class_, class_probs_dirty[class_]) for i in indices: dirty_train[i] += ' %s%s%s' % (fake_prefix, feature_name, fake_prefix) indices = get_random_indices(test_labels, class_, class_probs_clean[class_]) for i in indices: clean_test[i] += ' %s%s%s' % (fake_prefix, feature_name, fake_prefix) def corrupt_dataset(independent_features, train_data, train_labels, test_data, test_labels): # independent_features: list [([.3, .8],[.5,.5], 3), ([.1, .1],[0, 0], 1) # ...]. Each element in list is a tuple (l,l2, n) where l a list # representing the probability of seeing the feature in each class in the # dirty train data, l2 is a list representing the probability of seeing the # feature in each class the clean test data and n is the number of features # with this distribution to add. # returns (clean_train, dirty_train, clean_test) dirty_train = copy.deepcopy(train_data) clean_train = copy.deepcopy(train_data) clean_test = copy.deepcopy(test_data) idx = 0 for probs, probs2, n in independent_features: for i in range(n): add_corrupt_feature('%d' % idx, clean_train, clean_test, dirty_train, train_labels, test_labels, probs, probs2) idx += 1 return clean_train, dirty_train, clean_test def main(): parser = argparse.ArgumentParser(description='Evaluate some explanations') parser.add_argument('--dataset', '-d', type=str, required=True,help='dataset name') parser.add_argument('--output_folder', '-o', type=str, required=True, help='output folder') parser.add_argument('--num_features', '-k', type=int, required=True, help='num features') parser.add_argument('--num_rounds', '-r', type=int, required=True, help='num rounds') parser.add_argument('--start_id', '-i', type=int, default=0,required=False, help='output start id') args = parser.parse_args() dataset = args.dataset train_data, train_labels, test_data, test_labels, class_names = LoadDataset(dataset) rho = 25 kernel = lambda d: np.sqrt(np.exp(-(d**2) / rho ** 2)) local = explainers.GeneralizedLocalExplainer(kernel, explainers.data_labels_distances_mapping_text, num_samples=15000, return_mean=True, verbose=False, return_mapped=True) # Found through cross validation sigmas = {'multi_polarity_electronics': {'neighbors': 0.75, 'svm': 10.0, 'tree': 0.5, 'logreg': 0.5, 'random_forest': 0.5, 'embforest': 0.75}, 'multi_polarity_kitchen': {'neighbors': 1.0, 'svm': 6.0, 'tree': 0.75, 'logreg': 0.25, 'random_forest': 6.0, 'embforest': 1.0}, 'multi_polarity_dvd': {'neighbors': 0.5, 'svm': 0.75, 'tree': 8.0, 'logreg': 0.75, 'random_forest': 0.5, 'embforest': 5.0}, 'multi_polarity_books': {'neighbors': 0.5, 'svm': 7.0, 'tree': 2.0, 'logreg': 1.0, 'random_forest': 1.0, 'embforest': 3.0}} parzen1 = parzen_windows.ParzenWindowClassifier() parzen1.sigma = sigmas[dataset]['random_forest'] parzen2 = parzen_windows.ParzenWindowClassifier() parzen2.sigma = sigmas[dataset]['random_forest'] random = explainers.RandomExplainer() for Z in range(args.num_rounds): exps1 = {} exps2 = {} explainer_names = ['lime', 'parzen', 'random', 'greedy', 'mutual'] for expl in explainer_names: exps1[expl] = [] exps2[expl] = [] print 'Round', Z sys.stdout.flush() fake_features_z = [([.1, .2], [.1,.1], 10)]#, ([.2, .1], [.1,.1], 10)] clean_train, dirty_train, clean_test = corrupt_dataset(fake_features_z, train_data, train_labels, test_data, test_labels) vectorizer = CountVectorizer(lowercase=False, binary=True) dirty_train_vectors = vectorizer.fit_transform(dirty_train) clean_train_vectors = vectorizer.transform(clean_train) test_vectors = vectorizer.transform(clean_test) terms = np.array(list(vectorizer.vocabulary_.keys())) indices = np.array(list(vectorizer.vocabulary_.values())) inverse_vocabulary = terms[np.argsort(indices)] tokenizer = vectorizer.build_tokenizer() c1 = ensemble.RandomForestClassifier(n_estimators=30, max_depth=5) c2 = ensemble.RandomForestClassifier(n_estimators=30, max_depth=5) untrustworthy = [i for i, x in enumerate(inverse_vocabulary) if x.startswith('FAKE')] train_idx, test_idx = tuple(cross_validation.ShuffleSplit(dirty_train_vectors.shape[0], 1, 0.2))[0] train_acc1 = train_acc2 = test_acc1 = test_acc2 = 0 print 'Trying to find trees:' sys.stdout.flush() iteration = 0 found_tree = True while np.abs(train_acc1 - train_acc2) > 0.001 or np.abs(test_acc1 - test_acc2) < 0.05: iteration += 1 c1.fit(dirty_train_vectors[train_idx], train_labels[train_idx]) c2.fit(dirty_train_vectors[train_idx], train_labels[train_idx]) train_acc1 = accuracy_score(train_labels[test_idx], c1.predict(dirty_train_vectors[test_idx])) train_acc2 = accuracy_score(train_labels[test_idx], c2.predict(dirty_train_vectors[test_idx])) test_acc1 = accuracy_score(test_labels, c1.predict(test_vectors)) test_acc2 = accuracy_score(test_labels, c2.predict(test_vectors)) if iteration == 3000: found_tree = False break if not found_tree: print 'skipping iteration', Z continue print 'done' print 'Train acc1:', train_acc1, 'Train acc2:', train_acc2 print 'Test acc1:', test_acc1, 'Test acc2:', test_acc2 sys.stdout.flush() predictions = c1.predict(dirty_train_vectors) predictions2 = c2.predict(dirty_train_vectors) predict_probas = c1.predict_proba(dirty_train_vectors)[:,1] predict_probas2 = c2.predict_proba(dirty_train_vectors)[:,1] cv_preds1 = cross_validation.cross_val_predict(c1, dirty_train_vectors[train_idx], train_labels[train_idx], cv=5) cv_preds2 = cross_validation.cross_val_predict(c2, dirty_train_vectors[train_idx], train_labels[train_idx], cv=5) parzen1.fit(dirty_train_vectors[train_idx], cv_preds1) parzen2.fit(dirty_train_vectors[train_idx], cv_preds2) pp = [] pp2 = [] true_labels = [] iteration = 0 for i in test_idx: if iteration % 50 == 0: print iteration sys.stdout.flush() iteration += 1 pp.append(predict_probas[i]) pp2.append(predict_probas2[i]) true_labels.append(train_labels[i]) exp, mean = local.explain_instance(dirty_train_vectors[i], 1, c1.predict_proba, args.num_features) exps1['lime'].append((exp, mean)) exp = parzen1.explain_instance(dirty_train_vectors[i], 1, c1.predict_proba, args.num_features, None) mean = parzen1.predict_proba(dirty_train_vectors[i])[1] exps1['parzen'].append((exp, mean)) exp = random.explain_instance(dirty_train_vectors[i], 1, None, args.num_features, None) exps1['random'].append(exp) exp = explainers.explain_greedy_martens(dirty_train_vectors[i], predictions[i], c1.predict_proba, args.num_features) exps1['greedy'].append(exp) # Classifier 2 exp, mean = local.explain_instance(dirty_train_vectors[i], 1, c2.predict_proba, args.num_features) exps2['lime'].append((exp, mean)) exp = parzen2.explain_instance(dirty_train_vectors[i], 1, c2.predict_proba, args.num_features, None) mean = parzen2.predict_proba(dirty_train_vectors[i])[1] exps2['parzen'].append((exp, mean)) exp = random.explain_instance(dirty_train_vectors[i], 1, None, args.num_features, None) exps2['random'].append(exp) exp = explainers.explain_greedy_martens(dirty_train_vectors[i], predictions2[i], c2.predict_proba, args.num_features) exps2['greedy'].append(exp) out = {'true_labels' : true_labels, 'untrustworthy' : untrustworthy, 'train_acc1' : train_acc1, 'train_acc2' : train_acc2, 'test_acc1' : test_acc1, 'test_acc2' : test_acc2, 'exps1' : exps1, 'exps2': exps2, 'predict_probas1': pp, 'predict_probas2': pp2} pickle.dump(out, open(os.path.join(args.output_folder, 'comparing_%s_%s_%d.pickle' % (dataset, args.num_features, Z + args.start_id)), 'w')) if __name__ == "__main__": main()
bsd-2-clause
vmonaco/single-hashing
single_hash.py
1
2647
''' Created on Nov 20, 2012 @author: vinnie ''' from utils import * def in1d_running(q, A): ''' j where q[k] in A for 0 <= k <= j This is the maximum index j where q[0:j] is in A ''' j = 0 while j < len(q) and q[j] in A: j += 1 return j def s_A(Q, A): ''' s(A) = {(i,j) | q[i,k] in A for 0 <= k <= j} The set of all coordinates where Q[i,0:k] is in A for 0 <= k <= j, where j is defined by the ind1d_running function above ''' return [(i, k) for i in A for k in range(in1d_running(Q[i], A))] def P(Q, A, m): ''' Given the single hashing scheme defined by matrix Q, compute the probably that the first |A| slots are occupied by the slots in A ''' if len(A) == 0: return 0 elif len(A) == 1: return 1.0 / m else: return (1.0 / m) * sum([P(Q, tuple(a for a in A if a != Q[i][j]), m) for (i, j) in s_A(Q, A)]) def P_map(Q): ''' Compute P(A) for each n-combination in [0,1,2...m) for 0 <= n < m Also compute P( [0,1,2...m] ). Only one combination is needed, this should always be equal to 1.0 ''' m = len(Q) m_range = range(m) p = {A: P(Q, A, m) for A in generate_A(m_range)} return p def delta_prime(Q): ''' The average number of spaces probed for each insertion by the time the table is full. This is the best measure for the efficiency of a single hashing scheme ''' m = len(Q) m_range = [row[0] for row in Q] set_A = generate_A(m_range) return (1.0 / (m ** 2)) * sum(P(Q, A, m) * len(s_A(Q, A)) for A in set_A) def d_prime(Q, n): ''' The average number of probes needed to insert the nth element into a table with single hashing scheme Q ''' m = len(Q) m_range = [row[0] for row in Q] assert n <= m set_A = [A for A in generate_A(m_range) if len(A) == n - 1] return (1.0 / m) * sum(P(Q, A, m) * len(s_A(Q, A)) for A in set_A) def search_random(m, N): from operator import itemgetter import matplotlib.pyplot as plt import random random.seed(1234) score_Q = [(delta_prime(Q), Q) for Q in [random_Q(m) for _ in range(N)]] min_score, min_Q = min(score_Q, key=itemgetter(0)) max_score, max_Q = max(score_Q, key=itemgetter(0)) print('Best score:', min_score, min_Q) print('Worst score:', max_score, max_Q) plt.hist(list(zip(*score_Q))[0], bins=100, normed=True) plt.xlabel('Probes per insertion') plt.ylabel('Density') plt.savefig('m%d_scores.png' % m) return if __name__ == '__main__': search_random(5, 10000)
mit
junwucs/h2o-3
h2o-py/docs/conf.py
2
9583
# -*- coding: utf-8 -*- # # H2O documentation build configuration file, created by # sphinx-quickstart on Thu Feb 5 15:32:52 2015. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # import mock # # MOCK_MODULES = ['numpy', 'scipy', 'matplotlib', 'matplotlib.pyplot', 'scipy.interpolate', 'sklearn', 'sklearn.pipeline'] # for mod_name in MOCK_MODULES: # sys.modules[mod_name] = mock.Mock() # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'H2O' copyright = u'2015, H2O' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '' # The full version, including alpha/beta/rc tags. release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {sidebarbgcolor:'yellow'} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ["../../h2o-docs-theme"] # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'H2Odoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'H2O.tex', u'H2O Documentation', u'H2O', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'H2O', u'H2O Documentation', [u'Author'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'H2O', u'H2O Documentation', u'Author', 'H2O', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'H2O' epub_author = u'H2O' epub_publisher = u'H2O' epub_copyright = u'2015, H2O' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True
apache-2.0
wathen/PhD
MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/TH/Solver.py
1
5642
from dolfin import assemble, MixedFunctionSpace, tic,toc import petsc4py import sys petsc4py.init(sys.argv) from petsc4py import PETSc import CheckPetsc4py as CP import StokesPrecond import NSpreconditioner import MaxwellPrecond as MP import MatrixOperations as MO import PETScIO as IO import numpy as np import P as PrecondMulti import MHDprec import scipy.sparse as sp from scipy.linalg import svd import matplotlib.pylab as plt from scipy.sparse.linalg.dsolve import spsolve def solve(A,b,u,params, Fspace,SolveType,IterType,OuterTol,InnerTol,HiptmairMatrices,Hiptmairtol,KSPlinearfluids, Fp,kspF): if SolveType == "Direct": ksp = PETSc.KSP() ksp.create(comm=PETSc.COMM_WORLD) pc = ksp.getPC() ksp.setType('preonly') pc.setType('lu') OptDB = PETSc.Options() OptDB['pc_factor_mat_solver_package'] = "mumps" OptDB['pc_factor_mat_ordering_type'] = "rcm" ksp.setFromOptions() scale = b.norm() b = b/scale ksp.setOperators(A,A) del A ksp.solve(b,u) # Mits +=dodim u = u*scale MO.PrintStr("Number iterations = "+str(ksp.its),60,"+","\n\n","\n\n") return u,ksp.its,0 elif SolveType == "Direct-class": ksp = PETSc.KSP() ksp.create(comm=PETSc.COMM_WORLD) pc = ksp.getPC() ksp.setType('gmres') pc.setType('none') ksp.setFromOptions() scale = b.norm() b = b/scale ksp.setOperators(A,A) del A ksp.solve(b,u) # Mits +=dodim u = u*scale MO.PrintStr("Number iterations = "+str(ksp.its),60,"+","\n\n","\n\n") return u,ksp.its,0 else: # u = b.duplicate() if IterType == "Full": ksp = PETSc.KSP() ksp.create(comm=PETSc.COMM_WORLD) pc = ksp.getPC() ksp.setType('fgmres') pc.setType('python') pc.setType(PETSc.PC.Type.PYTHON) OptDB = PETSc.Options() OptDB['ksp_gmres_restart'] = 200 # FSpace = [Velocity,Magnetic,Pressure,Lagrange] reshist = {} def monitor(ksp, its, fgnorm): reshist[its] = fgnorm print its," OUTER:", fgnorm # ksp.setMonitor(monitor) ksp.max_it = 500 W = Fspace FFSS = [W.sub(0),W.sub(1),W.sub(2),W.sub(3)] pc.setPythonContext(MHDprec.InnerOuterMAGNETICinverse(FFSS,kspF, KSPlinearfluids[0], KSPlinearfluids[1],Fp, HiptmairMatrices[3], HiptmairMatrices[4], HiptmairMatrices[2], HiptmairMatrices[0], HiptmairMatrices[1], HiptmairMatrices[6],Hiptmairtol)) #OptDB = PETSc.Options() # OptDB['pc_factor_mat_solver_package'] = "mumps" # OptDB['pc_factor_mat_ordering_type'] = "rcm" # ksp.setFromOptions() scale = b.norm() b = b/scale ksp.setOperators(A,A) del A ksp.solve(b,u) # Mits +=dodim u = u*scale MO.PrintStr("Number iterations = "+str(ksp.its),60,"+","\n\n","\n\n") return u,ksp.its,0 IS = MO.IndexSet(Fspace,'2by2') M_is = IS[1] NS_is = IS[0] kspNS = PETSc.KSP().create() kspM = PETSc.KSP().create() kspNS.setTolerances(OuterTol) kspNS.setOperators(A[0]) kspM.setOperators(A[1]) # print P.symmetric if IterType == "MD": kspNS.setType('gmres') kspNS.max_it = 500 pcNS = kspNS.getPC() pcNS.setType(PETSc.PC.Type.PYTHON) pcNS.setPythonContext(NSpreconditioner.NSPCD(MixedFunctionSpace([Fspace.sub(0),Fspace.sub(1)]), kspF, KSPlinearfluids[0], KSPlinearfluids[1],Fp)) elif IterType == "CD": kspNS.setType('minres') pcNS = kspNS.getPC() pcNS.setType(PETSc.PC.Type.PYTHON) Q = KSPlinearfluids[1].getOperators()[0] Q = 1./params[2]*Q KSPlinearfluids[1].setOperators(Q,Q) pcNS.setPythonContext(StokesPrecond.MHDApprox(MixedFunctionSpace([Fspace.sub(0),Fspace.sub(1)]),kspF,KSPlinearfluids[1] )) reshist = {} def monitor(ksp, its, fgnorm): reshist[its] = fgnorm print fgnorm # kspNS.setMonitor(monitor) uNS = u.getSubVector(NS_is) bNS = b.getSubVector(NS_is) # print kspNS.view() scale = bNS.norm() bNS = bNS/scale print bNS.norm() kspNS.solve(bNS, uNS) uNS = uNS*scale NSits = kspNS.its kspNS.destroy() # for line in reshist.values(): # print line kspM.setFromOptions() kspM.setType(kspM.Type.MINRES) kspM.setTolerances(InnerTol) pcM = kspM.getPC() pcM.setType(PETSc.PC.Type.PYTHON) pcM.setPythonContext(MP.Hiptmair(MixedFunctionSpace([Fspace.sub(2),Fspace.sub(3)]), HiptmairMatrices[3], HiptmairMatrices[4], HiptmairMatrices[2], HiptmairMatrices[0], HiptmairMatrices[1], HiptmairMatrices[6],Hiptmairtol)) uM = u.getSubVector(M_is) bM = b.getSubVector(M_is) scale = bM.norm() bM = bM/scale print bM.norm() kspM.solve(bM, uM) uM = uM*scale Mits = kspM.its kspM.destroy() u = IO.arrayToVec(np.concatenate([uNS.array, uM.array])) MO.PrintStr("Number of M iterations = "+str(Mits),60,"+","\n\n","\n\n") MO.PrintStr("Number of NS/S iterations = "+str(NSits),60,"+","\n\n","\n\n") return u,NSits,Mits
mit
skyuuka/fast-rcnn
tools/train_svms.py
42
13247
#!/usr/bin/env python # -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick # -------------------------------------------------------- """ Train post-hoc SVMs using the algorithm and hyper-parameters from traditional R-CNN. """ import _init_paths from fast_rcnn.config import cfg, cfg_from_file from datasets.factory import get_imdb from fast_rcnn.test import im_detect from utils.timer import Timer import caffe import argparse import pprint import numpy as np import numpy.random as npr import cv2 from sklearn import svm import os, sys class SVMTrainer(object): """ Trains post-hoc detection SVMs for all classes using the algorithm and hyper-parameters of traditional R-CNN. """ def __init__(self, net, imdb): self.imdb = imdb self.net = net self.layer = 'fc7' self.hard_thresh = -1.0001 self.neg_iou_thresh = 0.3 dim = net.params['cls_score'][0].data.shape[1] scale = self._get_feature_scale() print('Feature dim: {}'.format(dim)) print('Feature scale: {:.3f}'.format(scale)) self.trainers = [SVMClassTrainer(cls, dim, feature_scale=scale) for cls in imdb.classes] def _get_feature_scale(self, num_images=100): TARGET_NORM = 20.0 # Magic value from traditional R-CNN _t = Timer() roidb = self.imdb.roidb total_norm = 0.0 count = 0.0 inds = npr.choice(xrange(self.imdb.num_images), size=num_images, replace=False) for i_, i in enumerate(inds): im = cv2.imread(self.imdb.image_path_at(i)) if roidb[i]['flipped']: im = im[:, ::-1, :] _t.tic() scores, boxes = im_detect(self.net, im, roidb[i]['boxes']) _t.toc() feat = self.net.blobs[self.layer].data total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum() count += feat.shape[0] print('{}/{}: avg feature norm: {:.3f}'.format(i_ + 1, num_images, total_norm / count)) return TARGET_NORM * 1.0 / (total_norm / count) def _get_pos_counts(self): counts = np.zeros((len(self.imdb.classes)), dtype=np.int) roidb = self.imdb.roidb for i in xrange(len(roidb)): for j in xrange(1, self.imdb.num_classes): I = np.where(roidb[i]['gt_classes'] == j)[0] counts[j] += len(I) for j in xrange(1, self.imdb.num_classes): print('class {:s} has {:d} positives'. format(self.imdb.classes[j], counts[j])) return counts def get_pos_examples(self): counts = self._get_pos_counts() for i in xrange(len(counts)): self.trainers[i].alloc_pos(counts[i]) _t = Timer() roidb = self.imdb.roidb num_images = len(roidb) # num_images = 100 for i in xrange(num_images): im = cv2.imread(self.imdb.image_path_at(i)) if roidb[i]['flipped']: im = im[:, ::-1, :] gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0] gt_boxes = roidb[i]['boxes'][gt_inds] _t.tic() scores, boxes = im_detect(self.net, im, gt_boxes) _t.toc() feat = self.net.blobs[self.layer].data for j in xrange(1, self.imdb.num_classes): cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0] if len(cls_inds) > 0: cls_feat = feat[cls_inds, :] self.trainers[j].append_pos(cls_feat) print 'get_pos_examples: {:d}/{:d} {:.3f}s' \ .format(i + 1, len(roidb), _t.average_time) def initialize_net(self): # Start all SVM parameters at zero self.net.params['cls_score'][0].data[...] = 0 self.net.params['cls_score'][1].data[...] = 0 # Initialize SVMs in a smart way. Not doing this because its such # a good initialization that we might not learn something close to # the SVM solution. # # subtract background weights and biases for the foreground classes # w_bg = self.net.params['cls_score'][0].data[0, :] # b_bg = self.net.params['cls_score'][1].data[0] # self.net.params['cls_score'][0].data[1:, :] -= w_bg # self.net.params['cls_score'][1].data[1:] -= b_bg # # set the background weights and biases to 0 (where they shall remain) # self.net.params['cls_score'][0].data[0, :] = 0 # self.net.params['cls_score'][1].data[0] = 0 def update_net(self, cls_ind, w, b): self.net.params['cls_score'][0].data[cls_ind, :] = w self.net.params['cls_score'][1].data[cls_ind] = b def train_with_hard_negatives(self): _t = Timer() roidb = self.imdb.roidb num_images = len(roidb) # num_images = 100 for i in xrange(num_images): im = cv2.imread(self.imdb.image_path_at(i)) if roidb[i]['flipped']: im = im[:, ::-1, :] _t.tic() scores, boxes = im_detect(self.net, im, roidb[i]['boxes']) _t.toc() feat = self.net.blobs[self.layer].data for j in xrange(1, self.imdb.num_classes): hard_inds = \ np.where((scores[:, j] > self.hard_thresh) & (roidb[i]['gt_overlaps'][:, j].toarray().ravel() < self.neg_iou_thresh))[0] if len(hard_inds) > 0: hard_feat = feat[hard_inds, :].copy() new_w_b = \ self.trainers[j].append_neg_and_retrain(feat=hard_feat) if new_w_b is not None: self.update_net(j, new_w_b[0], new_w_b[1]) print(('train_with_hard_negatives: ' '{:d}/{:d} {:.3f}s').format(i + 1, len(roidb), _t.average_time)) def train(self): # Initialize SVMs using # a. w_i = fc8_w_i - fc8_w_0 # b. b_i = fc8_b_i - fc8_b_0 # c. Install SVMs into net self.initialize_net() # Pass over roidb to count num positives for each class # a. Pre-allocate arrays for positive feature vectors # Pass over roidb, computing features for positives only self.get_pos_examples() # Pass over roidb # a. Compute cls_score with forward pass # b. For each class # i. Select hard negatives # ii. Add them to cache # c. For each class # i. If SVM retrain criteria met, update SVM # ii. Install new SVM into net self.train_with_hard_negatives() # One final SVM retraining for each class # Install SVMs into net for j in xrange(1, self.imdb.num_classes): new_w_b = self.trainers[j].append_neg_and_retrain(force=True) self.update_net(j, new_w_b[0], new_w_b[1]) class SVMClassTrainer(object): """Manages post-hoc SVM training for a single object class.""" def __init__(self, cls, dim, feature_scale=1.0, C=0.001, B=10.0, pos_weight=2.0): self.pos = np.zeros((0, dim), dtype=np.float32) self.neg = np.zeros((0, dim), dtype=np.float32) self.B = B self.C = C self.cls = cls self.pos_weight = pos_weight self.dim = dim self.feature_scale = feature_scale self.svm = svm.LinearSVC(C=C, class_weight={1: 2, -1: 1}, intercept_scaling=B, verbose=1, penalty='l2', loss='l1', random_state=cfg.RNG_SEED, dual=True) self.pos_cur = 0 self.num_neg_added = 0 self.retrain_limit = 2000 self.evict_thresh = -1.1 self.loss_history = [] def alloc_pos(self, count): self.pos_cur = 0 self.pos = np.zeros((count, self.dim), dtype=np.float32) def append_pos(self, feat): num = feat.shape[0] self.pos[self.pos_cur:self.pos_cur + num, :] = feat self.pos_cur += num def train(self): print('>>> Updating {} detector <<<'.format(self.cls)) num_pos = self.pos.shape[0] num_neg = self.neg.shape[0] print('Cache holds {} pos examples and {} neg examples'. format(num_pos, num_neg)) X = np.vstack((self.pos, self.neg)) * self.feature_scale y = np.hstack((np.ones(num_pos), -np.ones(num_neg))) self.svm.fit(X, y) w = self.svm.coef_ b = self.svm.intercept_[0] scores = self.svm.decision_function(X) pos_scores = scores[:num_pos] neg_scores = scores[num_pos:] pos_loss = (self.C * self.pos_weight * np.maximum(0, 1 - pos_scores).sum()) neg_loss = self.C * np.maximum(0, 1 + neg_scores).sum() reg_loss = 0.5 * np.dot(w.ravel(), w.ravel()) + 0.5 * b ** 2 tot_loss = pos_loss + neg_loss + reg_loss self.loss_history.append((tot_loss, pos_loss, neg_loss, reg_loss)) for i, losses in enumerate(self.loss_history): print((' {:d}: obj val: {:.3f} = {:.3f} ' '(pos) + {:.3f} (neg) + {:.3f} (reg)').format(i, *losses)) return ((w * self.feature_scale, b * self.feature_scale), pos_scores, neg_scores) def append_neg_and_retrain(self, feat=None, force=False): if feat is not None: num = feat.shape[0] self.neg = np.vstack((self.neg, feat)) self.num_neg_added += num if self.num_neg_added > self.retrain_limit or force: self.num_neg_added = 0 new_w_b, pos_scores, neg_scores = self.train() # scores = np.dot(self.neg, new_w_b[0].T) + new_w_b[1] # easy_inds = np.where(neg_scores < self.evict_thresh)[0] not_easy_inds = np.where(neg_scores >= self.evict_thresh)[0] if len(not_easy_inds) > 0: self.neg = self.neg[not_easy_inds, :] # self.neg = np.delete(self.neg, easy_inds) print(' Pruning easy negatives') print(' Cache holds {} pos examples and {} neg examples'. format(self.pos.shape[0], self.neg.shape[0])) print(' {} pos support vectors'.format((pos_scores <= 1).sum())) print(' {} neg support vectors'.format((neg_scores >= -1).sum())) return new_w_b else: return None def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='Train SVMs (old skool)') parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]', default=0, type=int) parser.add_argument('--def', dest='prototxt', help='prototxt file defining the network', default=None, type=str) parser.add_argument('--net', dest='caffemodel', help='model to test', default=None, type=str) parser.add_argument('--cfg', dest='cfg_file', help='optional config file', default=None, type=str) parser.add_argument('--imdb', dest='imdb_name', help='dataset to train on', default='voc_2007_trainval', type=str) if len(sys.argv) == 1: parser.print_help() sys.exit(1) args = parser.parse_args() return args if __name__ == '__main__': # Must turn this off to prevent issues when digging into the net blobs to # pull out features (tricky!) cfg.DEDUP_BOXES = 0 # Must turn this on because we use the test im_detect() method to harvest # hard negatives cfg.TEST.SVM = True args = parse_args() print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) print('Using config:') pprint.pprint(cfg) # fix the random seed for reproducibility np.random.seed(cfg.RNG_SEED) # set up caffe caffe.set_mode_gpu() if args.gpu_id is not None: caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svm' out_dir = os.path.dirname(args.caffemodel) imdb = get_imdb(args.imdb_name) print 'Loaded dataset `{:s}` for training'.format(imdb.name) # enhance roidb to contain flipped examples if cfg.TRAIN.USE_FLIPPED: print 'Appending horizontally-flipped training examples...' imdb.append_flipped_roidb() print 'done' SVMTrainer(net, imdb).train() filename = '{}/{}.caffemodel'.format(out_dir, out) net.save(filename) print 'Wrote svm model to: {:s}'.format(filename)
mit
bataeves/kaggle
instacart/imba/f1_optimal.py
2
1524
import pandas as pd import numpy as np from joblib import Parallel, delayed import multiprocessing from utils import fast_search none_product = 50000 def applyParallel(dfGrouped, func): retLst = Parallel(n_jobs=multiprocessing.cpu_count())(delayed(func)(group) for name, group in dfGrouped) return pd.concat(retLst) def create_products(df): # print(df.product_id.values.shape) products = df.product_id.values prob = df.prediction.values sort_index = np.argsort(prob)[::-1] values = fast_search(prob[sort_index][0:80], dtype=np.float64) index = np.argmax(values) print('iteration', df.shape[0], 'optimal value', index) best = ' '.join(map(lambda x: str(x) if x != none_product else 'None', products[sort_index][0:index])) df = df[0:1] df.loc[:, 'products'] = best return df if __name__ == '__main__': data = pd.read_pickle('data/prediction_rnn.pkl') data['not_a_product'] = 1. - data.prediction gp = data.groupby('order_id')['not_a_product'].apply(lambda x: np.multiply.reduce(x.values)).reset_index() gp.rename(columns={'not_a_product': 'prediction'}, inplace=True) gp['product_id'] = none_product data = pd.concat([data, gp], axis=0) data.product_id = data.product_id.astype(np.uint32) data = data.loc[data.prediction > 0.01, ['order_id', 'prediction', 'product_id']] data = applyParallel(data.groupby(data.order_id), create_products).reset_index() data[['order_id', 'products']].to_csv('data/sub.csv', index=False)
unlicense
mr3bn/DAT210x
Module6/assignment3.py
1
3383
import matplotlib.pyplot as plt import pandas as pd def load(path_test, path_train): # Load up the data. # You probably could have written this.. with open(path_test, 'r') as f: testing = pd.read_csv(f) with open(path_train, 'r') as f: training = pd.read_csv(f) # The number of samples between training and testing can vary # But the number of features better remain the same! n_features = testing.shape[1] X_test = testing.ix[:,:n_features-1] X_train = training.ix[:,:n_features-1] y_test = testing.ix[:,n_features-1:].values.ravel() y_train = training.ix[:,n_features-1:].values.ravel() # # Special: return X_train, X_test, y_train, y_test def peekData(x): # The 'targets' or labels are stored in y. The 'samples' or data is stored in X print "Peeking your data..." fig = plt.figure() cnt = 0 for col in range(5): for row in range(10): plt.subplot(5, 10, cnt + 1) plt.imshow(x.ix[cnt,:].reshape(8,8), cmap=plt.cm.gray_r, interpolation='nearest') plt.axis('off') cnt += 1 fig.set_tight_layout(True) plt.show() def drawPredictions(model, X_train, X_test, y_train, y_test): fig = plt.figure() # Make some guesses y_guess = model.predict(X_test) # # INFO: This is the second lab we're demonstrating how to # do multi-plots using matplot lab. In the next assignment(s), # it'll be your responsibility to use this and assignment #1 # as tutorials to add in the plotting code yourself! num_rows = 10 num_cols = 5 index = 0 for col in range(num_cols): for row in range(num_rows): plt.subplot(num_cols, num_rows, index + 1) # 8x8 is the size of the image, 64 pixels plt.imshow(X_test.ix[index,:].reshape(8,8), cmap=plt.cm.gray_r, interpolation='nearest') # Green = Guessed right # Red = Fail! fontcolor = 'g' if y_test[index] == y_guess[index] else 'r' plt.title('Label: %i' % y_guess[index], fontsize=6, color=fontcolor) plt.axis('off') index += 1 fig.set_tight_layout(True) plt.show() X = pd.read_table('Datasets/parkinsons.data', delimiter=',', index_col='name') y = X['status'] del X['status'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 7) import sklearn.preprocessing as pre transformer = pre.StandardScaler() transformer.fit(X_train) X_train = transformer.transform(X_train) X_test = transformer.transform(X_test) #from sklearn.decomposition import PCA #pca = PCA(n_components=14) #pca.fit(X_train) #X_train = pca.transform(X_train) #X_test = pca.transform(X_test) from sklearn import manifold iso = manifold.Isomap(n_neighbors=5, n_components=6) iso.fit(X_train) X_train = iso.transform(X_train) X_test = iso.transform(X_test) from sklearn.svm import SVC svc = SVC() svc.fit(X_train, y_train) print svc.score(X_test, y_test) import numpy as np c_range = np.arange(0.05, 2, 0.05) gamma_range = np.arange(0.001, 0.1, 0.001) best_c = 0 best_gamma = 0 best_score = 0 for c in c_range: for g in gamma_range: svc = SVC(C=c, gamma=g) svc.fit(X_train, y_train) if svc.score(X_test, y_test) > best_score: best_c = c best_gamma = g best_score = svc.score(X_test, y_test) print best_score print 'C: ', best_c print 'gamma: ', best_gamma
mit
davidgbe/scikit-learn
benchmarks/bench_glm.py
297
1493
""" A comparison of different methods in GLM Data comes from a random square matrix. """ from datetime import datetime import numpy as np from sklearn import linear_model from sklearn.utils.bench import total_seconds if __name__ == '__main__': import pylab as pl n_iter = 40 time_ridge = np.empty(n_iter) time_ols = np.empty(n_iter) time_lasso = np.empty(n_iter) dimensions = 500 * np.arange(1, n_iter + 1) for i in range(n_iter): print('Iteration %s of %s' % (i, n_iter)) n_samples, n_features = 10 * i + 3, 10 * i + 3 X = np.random.randn(n_samples, n_features) Y = np.random.randn(n_samples) start = datetime.now() ridge = linear_model.Ridge(alpha=1.) ridge.fit(X, Y) time_ridge[i] = total_seconds(datetime.now() - start) start = datetime.now() ols = linear_model.LinearRegression() ols.fit(X, Y) time_ols[i] = total_seconds(datetime.now() - start) start = datetime.now() lasso = linear_model.LassoLars() lasso.fit(X, Y) time_lasso[i] = total_seconds(datetime.now() - start) pl.figure('scikit-learn GLM benchmark results') pl.xlabel('Dimensions') pl.ylabel('Time (s)') pl.plot(dimensions, time_ridge, color='r') pl.plot(dimensions, time_ols, color='g') pl.plot(dimensions, time_lasso, color='b') pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left') pl.axis('tight') pl.show()
bsd-3-clause
lbishal/scikit-learn
benchmarks/bench_covertype.py
120
7381
""" =========================== Covertype dataset benchmark =========================== Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART (decision tree), RandomForest and Extra-Trees on the forest covertype dataset of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is low dimensional with 54 features and a sparsity of approx. 23%. Here, we consider the task of predicting class 1 (spruce/fir). The classification performance of SGD is competitive with Liblinear while being two orders of magnitude faster to train:: [..] Classification performance: =========================== Classifier train-time test-time error-rate -------------------------------------------- liblinear 15.9744s 0.0705s 0.2305 GaussianNB 3.0666s 0.3884s 0.4841 SGD 1.0558s 0.1152s 0.2300 CART 79.4296s 0.0523s 0.0469 RandomForest 1190.1620s 0.5881s 0.0243 ExtraTrees 640.3194s 0.6495s 0.0198 The same task has been used in a number of papers including: * `"SVM Optimization: Inverse Dependence on Training Set Size" <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_ S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08. * `"Pegasos: Primal estimated sub-gradient solver for svm" <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_ S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07. * `"Training Linear SVMs in Linear Time" <www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_ T. Joachims - In SIGKDD '06 [1] http://archive.ics.uci.edu/ml/datasets/Covertype """ from __future__ import division, print_function # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # Arnaud Joly <arnaud.v.joly@gmail.com> # License: BSD 3 clause import os from time import time import argparse import numpy as np from sklearn.datasets import fetch_covtype, get_data_home from sklearn.svm import LinearSVC from sklearn.linear_model import SGDClassifier, LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.metrics import zero_one_loss from sklearn.externals.joblib import Memory from sklearn.utils import check_array # Memoize the data extraction and memory map the resulting # train / test splits in readonly mode memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'), mmap_mode='r') @memory.cache def load_data(dtype=np.float32, order='C', random_state=13): """Load the data, then cache and memmap the train/test split""" ###################################################################### ## Load dataset print("Loading dataset...") data = fetch_covtype(download_if_missing=True, shuffle=True, random_state=random_state) X = check_array(data['data'], dtype=dtype, order=order) y = (data['target'] != 1).astype(np.int) ## Create train-test split (as [Joachims, 2006]) print("Creating train-test split...") n_train = 522911 X_train = X[:n_train] y_train = y[:n_train] X_test = X[n_train:] y_test = y[n_train:] ## Standardize first 10 features (the numerical ones) mean = X_train.mean(axis=0) std = X_train.std(axis=0) mean[10:] = 0.0 std[10:] = 1.0 X_train = (X_train - mean) / std X_test = (X_test - mean) / std return X_train, X_test, y_train, y_test ESTIMATORS = { 'GBRT': GradientBoostingClassifier(n_estimators=250), 'ExtraTrees': ExtraTreesClassifier(n_estimators=20), 'RandomForest': RandomForestClassifier(n_estimators=20), 'CART': DecisionTreeClassifier(min_samples_split=5), 'SGD': SGDClassifier(alpha=0.001, n_iter=2), 'GaussianNB': GaussianNB(), 'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False, tol=1e-3), 'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000) } if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--classifiers', nargs="+", choices=ESTIMATORS, type=str, default=['liblinear', 'GaussianNB', 'SGD', 'CART'], help="list of classifiers to benchmark.") parser.add_argument('--n-jobs', nargs="?", default=1, type=int, help="Number of concurrently running workers for " "models that support parallelism.") parser.add_argument('--order', nargs="?", default="C", type=str, choices=["F", "C"], help="Allow to choose between fortran and C ordered " "data") parser.add_argument('--random-seed', nargs="?", default=13, type=int, help="Common seed used by random number generator.") args = vars(parser.parse_args()) print(__doc__) X_train, X_test, y_train, y_test = load_data( order=args["order"], random_state=args["random_seed"]) print("") print("Dataset statistics:") print("===================") print("%s %d" % ("number of features:".ljust(25), X_train.shape[1])) print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size)) print("%s %s" % ("data type:".ljust(25), X_train.dtype)) print("%s %d (pos=%d, neg=%d, size=%dMB)" % ("number of train samples:".ljust(25), X_train.shape[0], np.sum(y_train == 1), np.sum(y_train == 0), int(X_train.nbytes / 1e6))) print("%s %d (pos=%d, neg=%d, size=%dMB)" % ("number of test samples:".ljust(25), X_test.shape[0], np.sum(y_test == 1), np.sum(y_test == 0), int(X_test.nbytes / 1e6))) print() print("Training Classifiers") print("====================") error, train_time, test_time = {}, {}, {} for name in sorted(args["classifiers"]): print("Training %s ... " % name, end="") estimator = ESTIMATORS[name] estimator_params = estimator.get_params() estimator.set_params(**{p: args["random_seed"] for p in estimator_params if p.endswith("random_state")}) if "n_jobs" in estimator_params: estimator.set_params(n_jobs=args["n_jobs"]) time_start = time() estimator.fit(X_train, y_train) train_time[name] = time() - time_start time_start = time() y_pred = estimator.predict(X_test) test_time[name] = time() - time_start error[name] = zero_one_loss(y_test, y_pred) print("done") print() print("Classification performance:") print("===========================") print("%s %s %s %s" % ("Classifier ", "train-time", "test-time", "error-rate")) print("-" * 44) for name in sorted(args["classifiers"], key=error.get): print("%s %s %s %s" % (name.ljust(12), ("%.4fs" % train_time[name]).center(10), ("%.4fs" % test_time[name]).center(10), ("%.4f" % error[name]).center(10))) print()
bsd-3-clause
yyl/btc-price-analysis
sentiment_app.py
1
6536
""" The Bokeh applet to demonstrate the relatinship of news and BTC price """ import logging logging.basicConfig(level=logging.DEBUG) ### bokeh import from bokeh.plotting import figure, curdoc from bokeh.models import Plot, ColumnDataSource from bokeh.properties import Instance from bokeh.server.app import bokeh_app from bokeh.server.utils.plugins import object_page from bokeh.models.widgets import HBox, Slider, TextInput, VBoxForm ### others import numpy as np import pandas as pd import os from datetime import datetime input_folder = "data" def get_data(threshold): ## read data based on the threshold news_price = pd.read_csv(os.path.join(input_folder, "interpolated_alchemy_nyt_bitcoin.csv"), header=True, names=['time', 'price', 'headline', 'score'], index_col=0, parse_dates=[0], date_parser=lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S")) news_price['color'] = 'red' news_price.loc[news_price['score']>threshold,'color'] = 'blue' return news_price class SlidersApp(HBox): """The main app, where parameters and controllers are defined.""" extra_generated_classes = [["SlidersApp", "SlidersApp", "HBox"]] ## read the BTC price data raw_price = pd.read_csv(os.path.join(input_folder, "price.csv"), names=['time', 'price'], index_col='time', parse_dates=[0], date_parser=lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S")) raw_price['time_index'] = raw_price.index raw_price.drop_duplicates(subset='time_index', take_last=True, inplace=True) del raw_price['time_index'] ## downsample to 12h data price_data = pd.DataFrame(raw_price.resample('12h', how='ohlc').ix[:, 3]) price_data.columns = ['price'] ## data source source = Instance(ColumnDataSource) ## inputs inputs = Instance(VBoxForm) text = Instance(TextInput) threshold = Instance(Slider) ## plots plot = Instance(Plot) @classmethod def create(cls): """One-time creation of app's objects. This function is called once, and is responsible for creating all objects (plots, datasources, etc) """ ## create the obj of the app obj = cls() obj.inputs = VBoxForm() ## create input widgets obj.make_inputs() ## create data source obj.make_source() ## obj.make_plots() ## layout obj.set_children() return obj def make_inputs(self): self.text = TextInput( title="Title", name='title', value='BTC chart with news tag' ) self.threshold = Slider( title="Threshold", name='threshold', value=0.0, start=-1.0, end=1.0, step=0.1 ) def make_source(self): self.source = ColumnDataSource(data=self.df) def make_plots(self): toolset = "crosshair,pan,reset,resize,save,wheel_zoom" ## fixed time index limit (epoch) start_time = (datetime.strptime("1/1/12 16:30", "%d/%m/%y %H:%M") - datetime(1970,1,1)).total_seconds()*1000 end_time = (datetime.strptime("1/5/15 16:30", "%d/%m/%y %H:%M") - datetime(1970,1,1)).total_seconds()*1000 ## Generate a figure container plot = figure(title_text_font_size="12pt", plot_height=600, plot_width=1100, tools=toolset, title=self.text.value, # title="BTC chart with news", x_axis_type="datetime", x_range=[start_time, end_time], y_range=[0, 1300] ) plot.below[0].formatter.formats = dict(years=['%Y'], months=['%b %Y'], days=['%d %b %Y']) ## the price line plot plot.line( self.price_data.index, self.price_data['price'], # color='#A6CEE3', legend='BTC Price' ) ## the news tag plot plot.circle('time', 'price', source=self.source, fill_color=self.source.data['color'], legend="News", size=8, ) self.plot = plot def set_children(self): self.children = [self.inputs, self.plot] self.inputs.children = [self.text, self.threshold] def setup_events(self): """Attaches the on_change event to the value property of the widget. The callback is set to the input_change method of this app. """ super(SlidersApp, self).setup_events() # if not self.text: # return # Text box event registration if self.text: self.text.on_change('value', self, 'input_change') # Slider event registration if self.threshold: self.threshold.on_change('value', self, 'input_change') def input_change(self, obj, attrname, old, new): """Executes whenever the input form changes. It is responsible for updating the plot, or anything else you want. Args: obj : the object that changed attrname : the attr that changed old : old value of attr new : new value of attr """ self.update_data() curdoc().add(self) def update_data(self): """Called each time that any watched property changes. This updates the sin wave data with the most recent values of the sliders. This is stored as two numpy arrays in a dict into the app's data source property. """ self.make_source() self.make_plots() self.set_children() # x = news_price.index # y = news_price['price'] # logging.debug( # "Threshold: %f" % self.threshold.value # ) ## plug back to source of the obj # self.source.data = dict(x=x, y=y, color=news_price['color']) @property def df(self): return get_data(self.threshold.value) # The following code adds a "/bokeh/sliders/" url to the bokeh-server. This # URL will render this sine wave sliders app. If you don't want to serve this # applet from a Bokeh server (for instance if you are embedding in a separate # Flask application), then just remove this block of code. @bokeh_app.route("/bokeh/sliders/") @object_page("sin") def make_sliders(): app = SlidersApp.create() return app
gpl-2.0
7even7/DAT210x
Module5/assignment8.py
1
5205
import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression matplotlib.style.use('ggplot') # Look Pretty def drawLine(model, X_test, y_test, title): # This convenience method will take care of plotting your # test observations, comparing them to the regression line, # and displaying the R2 coefficient fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(X_test, y_test, c='g', marker='o') ax.plot(X_test, model.predict(X_test), color='orange', linewidth=1, alpha=0.7) print "Est 2014 " + title + " Life Expectancy: ", model.predict([[2014]])[0] print "Est 2030 " + title + " Life Expectancy: ", model.predict([[2030]])[0] print "Est 2045 " + title + " Life Expectancy: ", model.predict([[2045]])[0] score = model.score(X_test, y_test) title += " R2: " + str(score) ax.set_title(title) plt.show() # # TODO: Load up the data here into a variable called 'X'. # As usual, do a .describe and a print of your dataset and # compare it to the dataset loaded in a text file or in a # spread sheet application # # .. your code here .. X = pd.read_csv('C:\Data\Projektit\DAT210x\Module5\Datasets\life_expectancy.csv', sep='\s+') # # TODO: Create your linear regression model here and store it in a # variable called 'model'. Don't actually train or do anything else # with it yet: # # .. your code here .. model = LinearRegression() # # TODO: Slice out your data manually (e.g. don't use train_test_split, # but actually do the Indexing yourself. Set X_train to be year values # LESS than 1986, and y_train to be corresponding WhiteMale age values. # # INFO You might also want to read the note about slicing on the bottom # of this document before proceeding. # # .. your code here .. X_train = X[X['Year']<1986][['Year']] y_train = X[X['Year']<1986][['WhiteMale']] # # TODO: Train your model then pass it into drawLine with your training # set and labels. You can title it "WhiteMale". drawLine will output # to the console a 2014 extrapolation / approximation for what it # believes the WhiteMale's life expectancy in the U.S. will be... # given the pre-1986 data you trained it with. It'll also produce a # 2030 and 2045 extrapolation. # # .. your code here .. WhiteMale = model.fit(X_train, y_train) drawLine(WhiteMale, X_train, y_train, "WhiteMale") # # TODO: Print the actual 2014 WhiteMale life expectancy from your # loaded dataset # # .. your code here .. print X[X['Year']==2014][['WhiteMale']] # # TODO: Repeat the process, but instead of for WhiteMale, this time # select BlackFemale. Create a slice for BlackFemales, fit your # model, and then call drawLine. Lastly, print out the actual 2014 # BlackFemale life expectancy # # .. your code here .. X_train = X[X['Year']<1986][['Year']] y_train = X[X['Year']<1986][['BlackFemale']] BlackFemale = model.fit(X_train, y_train) drawLine(BlackFemale, X_train, y_train, "BlackFemale") # # TODO: Lastly, print out a correlation matrix for your entire # dataset, and display a visualization of the correlation # matrix, just as we described in the visualization section of # the course # # .. your code here .. X.set_index('Year', inplace=True) plt.imshow(X.corr(), cmap=plt.cm.Blues, interpolation='nearest') tick_marks = [i for i in range(len(X.columns))] plt.xticks(tick_marks, X.columns, rotation='vertical') plt.yticks(tick_marks, X.columns) plt.colorbar() plt.show() # # INFO + HINT On Fitting, Scoring, and Predicting: # # Here's a hint to help you complete the assignment without pulling # your hair out! When you use .fit(), .score(), and .predict() on # your model, SciKit-Learn expects your training data to be in # spreadsheet (2D Array-Like) form. This means you can't simply # pass in a 1D Array (slice) and get away with it. # # To properly prep your data, you have to pass in a 2D Numpy Array, # or a dataframe. But what happens if you really only want to pass # in a single feature? # # If you slice your dataframe using df[['ColumnName']] syntax, the # result that comes back is actually a *dataframe*. Go ahead and do # a type() on it to check it out. Since it's already a dataframe, # you're good -- no further changes needed. # # But if you slice your dataframe using the df.ColumnName syntax, # OR if you call df['ColumnName'], the result that comes back is # actually a series (1D Array)! This will cause SKLearn to bug out. # So if you are slicing using either of those two techniques, before # sending your training or testing data to .fit / .score, do a # my_column = my_column.reshape(-1,1). This will convert your 1D # array of [n_samples], to a 2D array shaped like [n_samples, 1]. # A single feature, with many samples. # # If you did something like my_column = [my_column], that would produce # an array in the shape of [1, n_samples], which is incorrect because # SKLearn expects your data to be arranged as [n_samples, n_features]. # Keep in mind, all of the above only relates to your "X" or input # data, and does not apply to your "y" or labels.
mit
BillFoland/daisyluAMR
networks/AMR_NN_Forward.py
1
6957
import os import numpy as np import h5py import matplotlib.pyplot as plt import pandas as pd import pickle import argparse import ConfigParser import sys import shlex from string import Template import time import argparse import distutils.util parser = argparse.ArgumentParser(fromfile_prefix_chars='@') parser.add_argument('-n', dest='network', default='SG', help='SG, Args, Nargs, Attr, Cat' ) parser.add_argument('-v', dest='vectorFn', required = True, help=' ') parser.add_argument('-m', dest='modelFn', required = True, help=' ') parser.add_argument('-w', dest='weightsFn', required = True, help=' ') parser.add_argument('-s', dest='sType', required = True, help='train, test, dev') parser.add_argument('-r', dest='resultsFn', default='', help=' ') parser.add_argument('-p', dest='pickleFn', default='', help='store yvals and targets') parser.add_argument('--gpuMem', dest='gpuMem', default=0.0, type=float, help='0.0=no gpu, 1.0=all memory') parser.add_argument('--hardSG', dest='hardSG', default=False, type=distutils.util.strtobool, help='force HardSG from soft') parser.add_argument('--forceSenna', dest='forceSenna', default=False, type=distutils.util.strtobool, help='translate from Glove to Senna') parser.add_argument('--forceGlove', dest='forceGlove', default=False, type=distutils.util.strtobool, help='translate from Senna to Glove') parser.add_argument('--debug', dest='debug', default=False, type=distutils.util.strtobool, help='Debug') parser.add_argument('--maxSamples', dest='maxSamples', default=None, type=int, help='Maximum Samples from train, test, dev') parser.add_argument('--noSG', dest='noSG', default=False, type=distutils.util.strtobool, help='no SG Feature input') parser.add_argument('--testBatch', dest='testBatch', default=256, type=int, help='batch size for test') if len(sys.argv) == 1: # add default option string here aString = ' ' sys.argv = [''] + aString.split(' ') print sys.argv if sys.argv[1].startswith('@'): args, unknown = parser.parse_known_args() args, unknown = parser.parse_known_args( shlex.split(open(sys.argv[1][1:]).read()) ) if unknown: print '\n' * 10 print 'Warning, unknown args', unknown print '\n' * 10 else: args = parser.parse_args() s = [] for arg in vars(args): s.append( '%-20s = %-20s %-20s ' % (arg, getattr(args, arg), '(' + str(type(getattr(args, arg))) + ')' ) ) s.sort() #print '\n'.join(s) if (args.gpuMem < 0.01): os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152 os.environ["CUDA_VISIBLE_DEVICES"] = "" import tensorflow as tf import keras.backend.tensorflow_backend as KTF def get_session(gpu_fraction=0.6): '''Assume that you have 6GB of GPU memory and want to allocate ~2GB''' num_threads = os.environ.get('OMP_NUM_THREADS') gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction) if num_threads: return tf.Session(config=tf.ConfigProto( gpu_options=gpu_options, intra_op_parallelism_threads=num_threads)) else: return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) if args.gpuMem >= 0.01: KTF.set_session(get_session(gpu_fraction=args.gpuMem)) import keras from keras import backend as K from keras.layers import Input, Embedding, LSTM, Dense, Reshape, merge, Concatenate from keras.layers import Activation, Lambda, Dropout, Layer, Masking, TimeDistributed, Bidirectional from keras.models import Model, model_from_json from SGGenerator import * from pprint import pprint as p from keras.regularizers import l2 from keras.layers.normalization import BatchNormalization from keras.callbacks import EarlyStopping, ModelCheckpoint from keras.utils import plot_model ga = AMRDataGenerator.getGeneralArch(args.vectorFn) if ga['target']=='L0': agt = SGGenerator(args.vectorFn, args.sType, args.testBatch , maxItems=args.maxSamples) elif ga['target']=='args': agt = ArgsGenerator(args.vectorFn, args.sType, args.testBatch , maxItems=args.maxSamples) elif ga['target']=='nargs': agt = NargsGenerator(args.vectorFn, args.sType, args.testBatch , maxItems=args.maxSamples) elif ga['target']=='attr': agt = AttrGenerator(args.vectorFn, args.sType, args.testBatch , maxItems=args.maxSamples) elif ga['target']=='ncat': agt = CatGenerator(args.vectorFn, args.sType, args.testBatch , maxItems=args.maxSamples) else: print 'Type of network is not determined by the vector genArch:' p(ga) print ga exit(1) # load json and create model json_file = open(args.modelFn, 'r') loaded_model_json = json_file.read() json_file.close() model = model_from_json(loaded_model_json) model.load_weights(args.weightsFn) model.summary() distSG_embedding_matrix = agt.readAMRDBFeatureInfo() layers = model.layers loadCount=0 for i in range(len(layers)): name = layers[i].name if 'distSGTable' == name: print 'loading weights from vectors into model for ', name w = model.get_layer(name).get_weights() print w[0].shape print distSG_embedding_matrix.shape w[0][:distSG_embedding_matrix.shape[0]] = distSG_embedding_matrix model.get_layer(name).set_weights(w) loadCount+=1 elif 'logDistSGTable' == name: log_em = np.log(distSG_embedding_matrix + 1e-20) # don't allow zero, log is inf. log_em[0] *= 0.0 print 'loading weights from vectors into model for ', name w = model.get_layer(name).get_weights() print w[0].shape print log_em.shape w[0][:log_em.shape[0]] = log_em model.get_layer(name).set_weights(w) loadCount+=1 if loadCount != 1: print 'WARNING, load count is', loadCount numberOfBatches = (agt.numberOfItems())/args.testBatch if numberOfBatches * args.testBatch < agt.numberOfItems(): numberOfBatches += 1 y_vals = model.predict_generator(agt.generate(), numberOfBatches)[0:agt.numberOfItems()] agt.setCurrentIX(0) targets = agt.getTargets(agt.numberOfItems() ) print 'yvals, targets: ', len(y_vals), len(targets) if args.pickleFn: pickle.dump ( (y_vals, targets), open(args.pickleFn, 'wb') ) if args.resultsFn: df, sm, rc, sc, precision, recall, f1, cString = agt.writeAMRResultsDatabase(args.resultsFn, y_vals, targets) else: df, sm, rc, sc, precision, recall, f1, cString = agt.getConfusionStats(y_vals, targets) print cString print df print 'test sm, rc, sc, precision, recall, f1:', sm, rc, sc, precision, recall, f1 print 'Done'
mit
dessn/sn-bhm
dessn/snana/convert_snana_data.py
1
14063
# -*- coding: utf-8 -*- """ Created on Thu Sep 15 12:42:49 2016 @author: shint1 """ import numpy as np import pandas as pd import os import pickle import inspect import re import fnmatch import hashlib import logging from scipy.stats import norm from scipy.stats import binned_statistic from dessn.snana.systematic_names import get_systematic_mapping def load_fitres(filename, skiprows=6): # logging.debug("Loading %s" % filename) if filename.endswith(".gz"): compression = "gzip" else: compression = None try: dataframe = pd.read_csv(filename, sep='\s+', compression=compression, skiprows=skiprows, comment="#") except ValueError: logging.error("Filename %s failed to load" % filename) return None columns = ['CID', 'IDSURVEY', 'zHD', 'HOST_LOGMASS', 'HOST_LOGMASS_ERR', 'x1', 'x1ERR', 'c', 'cERR', 'mB', 'mBERR', 'x0', 'x0ERR', 'COV_x1_c', 'COV_x1_x0', 'COV_c_x0', 'SIM_mB', 'SIM_x1', 'SIM_c', 'biasCor_mB', 'biasCor_x1', 'biasCor_c'] final_columns = [c for c in columns if c in dataframe.columns] data = dataframe[final_columns].to_records() return data def is_pos_def(x): if not np.all(np.isfinite(x)): return False return np.all(np.linalg.eigvals(x) > 0) def get_scaling(): file = os.path.abspath(inspect.stack()[0][1]) dir_name = os.path.dirname(file) scale_file = dir_name + os.sep + "CREATE_COV.INPUT" results = [] with open(scale_file) as f: for line in f: if line.startswith("ERRSCALE"): comps = line.split() results.append((comps[1], float(comps[3]))) return results def load_systematic_names(nml_file): expression = re.compile("[^#]ITOPT.*\[(.*)\](?!.*FITOPT000).") with open(nml_file) as f: results = expression.findall(f.read()) return results def get_systematic_scales(nml_file, override=False): scaling = get_scaling() systematic_names = load_systematic_names(nml_file) sys_label_dict = get_systematic_mapping() systematic_labels = [sys_label_dict.get(n, n) for n in systematic_names] systematics_scales = [] for name in systematic_names: scale = 1.0 if not override: for n, s in scaling: if fnmatch.fnmatch(name, n): scale = s break else: logging.info("Override engaged. Setting scales to unity.") systematics_scales.append(scale) logging.debug("systemtatic scales are %s" % systematics_scales) return systematic_labels, systematics_scales def get_directories(base_folder): file = os.path.abspath(inspect.stack()[0][1]) dir_name = os.path.dirname(file) dump_dir = os.path.abspath(dir_name + "/data_dump/" + base_folder) output_dir = os.path.abspath(dir_name + "/../framework/simulations/snana_data/") + "/" if base_folder.split("_")[-1].startswith("v"): base_folder = "_".join(base_folder.split("_")[:-1]) nml_file = dump_dir + "/" + base_folder + ".nml" if not os.path.exists(nml_file): logging.error("Cannot find the NML file at %s" % nml_file) exit(1) return dump_dir, output_dir, nml_file def get_realisations(base_folder, dump_dir): if base_folder.endswith("sys"): base_folder = base_folder[:-4] if base_folder.split("_")[-1].startswith("v"): base_folder = "_".join(base_folder.split("_")[:-1]) print("Base folder for sims: %s" % base_folder) inner_files = sorted(list(os.listdir(dump_dir))) inner_paths = [dump_dir + "/" + f for f in inner_files] sim_dirs = [p for p, f in zip(inner_paths, inner_files) if os.path.isdir(p) and f.startswith("DES3YR")] logging.info("Found %d sim directories in %s" % (len(sim_dirs), dump_dir)) return sim_dirs def load_dump_file(sim_dir): filename = "SIMGEN.DAT.gz" if os.path.exists(sim_dir + "/SIMGEN.DAT.gz") else "SIMGEN.DAT" compression = "gzip" if filename.endswith("gz") else None names = ["SN", "CID", "S2mb", "MAGSMEAR_COH", "S2c", "S2x1", "Z"] keep = ["CID", "S2mb", "MAGSMEAR_COH", "S2c", "Z"] dtypes = [int, float, float, float, float] dataframe = pd.read_csv(sim_dir + "/" + filename, compression=compression, sep='\s+', skiprows=1, comment="V", error_bad_lines=False, names=names) logging.info("Loaded dump file from %s" % (sim_dir + "/" + filename)) data = dataframe.to_records() res = [] for row in data: try: r = [d(row[k]) for k, d in zip(keep, dtypes)] res.append(tuple(r)) except Exception: pass data = np.array(res, dtype=[('CID', np.int32), ('S2mb', np.float64), ('MAGSMEAR_COH', np.float64), ("S2c", np.float64), ("Z", np.float64)]) good_mask = (data["S2mb"] > 10) & (data["S2mb"] < 30) data = data[good_mask] return data def digest_simulation(sim_dir, systematics_scales, output_dir, systematic_labels, load_dump=False, skip=6, biascor=None, zipped=True): max_offset_mB = 0.2 ind = 0 if "-0" in sim_dir: ind = int(sim_dir.split("-0")[-1]) - 1 logging.info("Digesting index %d in folder %s" % (ind, sim_dir)) bias_fitres = None if biascor is not None: logging.info("Biascor is %s" % biascor) short_name = os.path.basename(sim_dir).replace("DES3YR", "").replace("_DES_", "").replace("_LOWZ_", "") bias_loc = os.path.dirname(os.path.dirname(sim_dir)) + os.sep + biascor + os.sep + short_name bias_fitres_file = bias_loc + os.sep + "SALT2mu_FITOPT000_MUOPT000.FITRES" assert os.path.exists(bias_fitres_file) fres = load_fitres(bias_fitres_file, skiprows=5) bias_fitres = {"%s_%s" % (row["IDSURVEY"], row["CID"]): [row['biasCor_mB'], row['biasCor_x1'], row['biasCor_c']] for row in fres} inner_files = sorted(list(os.listdir(sim_dir))) ending = ".FITRES.gz" if zipped else ".FITRES" fitres_files = sorted([sim_dir + "/" + i for i in inner_files if i.endswith(ending)]) base_fitres = fitres_files[0] sysematics_fitres = fitres_files[1:] logging.info("Have %d fitres files for systematics" % len(sysematics_fitres)) base_fits = load_fitres(base_fitres, skiprows=skip) sysematics = [load_fitres(m, skiprows=skip) for m in sysematics_fitres] sysematics_sort_indexes = [None if m is None else np.argsort(m['CID']) for m in sysematics] sysematics_idss = [None if m is None else m['CID'][s] for m, s in zip(sysematics, sysematics_sort_indexes)] systematic_labels_save = [s for sc, s in zip(systematics_scales, systematic_labels) if sc != 0] num_bad_calib = 0 num_bad_calib_index = np.zeros(len(systematic_labels_save)) logging.debug("Have %d, %d, %d, %d systematics" % (len(sysematics), len(sysematics_sort_indexes), len(sysematics_idss), len(systematics_scales))) final_results = [] passed_cids = [] all_offsets = [] logging.debug("Have %d rows to process" % base_fits.shape) mass_mean = np.mean(base_fits["HOST_LOGMASS_ERR"]) not_found = 0 fake_mass = False if mass_mean == -9 or mass_mean == 0: logging.warning("Mass is fake") fake_mass = True for i, row in enumerate(base_fits): if i % 1000 == 0 and i > 0: logging.debug("Up to row %d" % i) cid = row['CID'] survey_id = row['IDSURVEY'] key = "%s_%s" % (survey_id, cid) if bias_fitres is None: not_found += 1 bias_mB, bias_x1, bias_c = 0, 0, 0 else: if key not in bias_fitres: not_found += 1 continue else: bias_mB, bias_x1, bias_c = bias_fitres[key] z = row['zHD'] mb = row['mB'] x0 = row['x0'] x1 = row['x1'] c = row['c'] mass = row['HOST_LOGMASS'] mass_err = row['HOST_LOGMASS_ERR'] if mass < 0: mass_prob = 0 else: if mass_err < 0.01: mass_err = 0.01 mass_prob = 1 - norm.cdf(10, mass, mass_err) if fake_mass: mass_prob = 0 mbe = row["mBERR"] x1e = row["x1ERR"] ce = row["cERR"] if "SIM_mB" not in row.dtype.names: sim_mb = 0 sim_x1 = 0 sim_c = 0 else: sim_mb = row["SIM_mB"] sim_x1 = row["SIM_x1"] sim_c = row["SIM_c"] cov_x1_c = row["COV_x1_c"] cov_x0_c = row["COV_c_x0"] cov_x1_x0 = row["COV_x1_x0"] cmbx1 = -5 * cov_x1_x0 / (2 * x0 * np.log(10)) cmbc = -5 * cov_x0_c / (2 * x0 * np.log(10)) cov = np.array([[mbe * mbe, cmbx1, cmbc], [cmbx1, x1e * x1e, cov_x1_c], [cmbc, cov_x1_c, ce * ce]]) if not is_pos_def(cov): continue offset_mb = [] offset_x1 = [] offset_c = [] for mag, sorted_indexes, magcids, scale in \ zip(sysematics, sysematics_sort_indexes, sysematics_idss, systematics_scales): if scale == 0: continue if mag is None: offset_mb.append(0.0) offset_x1.append(0.0) offset_c.append(0.0) continue index = np.searchsorted(magcids, cid) if index >= magcids.size or magcids[index] != cid: offset_mb.append(0.0) offset_x1.append(0.0) offset_c.append(0.0) else: offset_mb.append((mag['mB'][sorted_indexes[index]] - mb) * scale) offset_x1.append((mag['x1'][sorted_indexes[index]] - x1) * scale) offset_c.append((mag['c'][sorted_indexes[index]] - c) * scale) if len(offset_mb) == 0: offset_mb.append(0.0) offset_x1.append(0.0) offset_c.append(0.0) if np.any(np.isnan(offset_mb)) or np.any(np.abs(offset_mb) > max_offset_mB): num_bad_calib += 1 num_bad_calib_index += np.isnan(offset_mb) continue offsets = np.vstack((offset_mb, offset_x1, offset_c)) passed_cids.append(cid) if isinstance(cid, str): cid = int(hashlib.sha256(cid.encode('utf-8')).hexdigest(), 16) % 10 ** 8 all_offsets.append(offsets) final_result = [cid, z, mass_prob, sim_mb, sim_x1, sim_c, mb, x1, c, bias_mB, bias_x1, bias_c] \ + cov.flatten().tolist() + offsets.flatten().tolist() final_results.append(final_result) if not os.path.exists(output_dir): os.makedirs(output_dir) fitted_data = np.array(final_results).astype(np.float32) np.save("%s/passed_%d.npy" % (output_dir, ind), fitted_data) logging.info("Bias not found for %d out of %d SN (%d found)" % (not_found, base_fits.shape[0], base_fits.shape[0] - not_found)) logging.info("Calib faliures: %d in total. Breakdown: %s" % (num_bad_calib, num_bad_calib_index)) # Save the labels out with open(output_dir + "/sys_names.pkl", 'wb') as f: pickle.dump(systematic_labels_save, f, protocol=pickle.HIGHEST_PROTOCOL) if load_dump: supernovae = load_dump_file(sim_dir) all_mags = supernovae["S2mb"].astype(np.float64) + supernovae["MAGSMEAR_COH"].astype(np.float64) all_zs = supernovae["Z"].astype(np.float64) all_cids = supernovae["CID"].astype(np.int32) cids_dict = dict([(c, True) for c in passed_cids]) supernovae_passed = np.array([c in cids_dict for c in all_cids]) mask_nan = ~np.isnan(all_mags) all_data = np.vstack((all_mags[mask_nan] + 100 * supernovae_passed[mask_nan], all_zs[mask_nan])).T print(all_data.shape) if all_data.shape[0] > 7000000: all_data = all_data[:7000000, :] np.save(output_dir + "/all_%s.npy" % ind, all_data.astype(np.float32)) logging.info("%d nans in apparents. Probably correspond to num sims." % (~mask_nan).sum()) def convert(base_folder, load_dump=False, override=False, skip=11, biascor=None, zipped=True): dump_dir, output_dir, nml_file = get_directories(base_folder) logging.info("Found nml file %s" % nml_file) systematic_labels, systematics_scales = get_systematic_scales(nml_file, override=override) sim_dirs = get_realisations(base_folder, dump_dir) version = "" if base_folder.split("_")[-1].startswith("v"): version = "_" + base_folder.split("_")[-1] for sim in sim_dirs: sim_name = os.path.basename(sim) if "-0" in sim_name: this_output_dir = output_dir + sim_name.split("-0")[0] else: this_output_dir = output_dir + sim_name if base_folder.endswith("sys"): this_output_dir += "sys" this_output_dir += version digest_simulation(sim, systematics_scales, this_output_dir, systematic_labels, load_dump=load_dump, skip=skip, biascor=biascor, zipped=zipped) if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG, format="[%(funcName)20s()] %(message)s") # convert("DES3YR_LOWZ_COMBINED_TEXT_v8") convert("DES3YR_DES_COMBINED_TEXT_v8") # convert("DES3YR_DES_NOMINAL") # convert("DES3YR_LOWZ_NOMINAL") # convert("DES3YR_DES_BULK_v8", skip=6) # convert("DES3YR_LOWZ_BULK_v8", skip=6) # convert("DES3YR_DES_SAM_COHERENT_v8", skip=11) # convert("DES3YR_DES_SAM_MINUIT_v8", skip=11) # convert("DES3YR_LOWZ_SAM_COHERENT_v8", skip=11) # convert("DES3YR_LOWZ_SAM_MINUIT_v8", skip=11) # convert("DES3YR_LOWZ_BULK", skip=6) # convert("DES3YR_DES_SAMTEST", skip=11) # convert("DES3YR_LOWZ_SAMTEST", skip=11) # convert("DES3YR_DES_BHMEFF_v8", load_dump=True, skip=11, zipped=True) # convert("DES3YR_LOWZ_BHMEFF_v8", load_dump=True, skip=11, zipped=True) # convert("DES3YR_LOWZ_VALIDATION", skip=11) # convert("DES3YR_DES_VALIDATION", skip=11)
mit
siutanwong/scikit-learn
sklearn/utils/arpack.py
265
64837
""" This contains a copy of the future version of scipy.sparse.linalg.eigen.arpack.eigsh It's an upgraded wrapper of the ARPACK library which allows the use of shift-invert mode for symmetric matrices. Find a few eigenvectors and eigenvalues of a matrix. Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/ """ # Wrapper implementation notes # # ARPACK Entry Points # ------------------- # The entry points to ARPACK are # - (s,d)seupd : single and double precision symmetric matrix # - (s,d,c,z)neupd: single,double,complex,double complex general matrix # This wrapper puts the *neupd (general matrix) interfaces in eigs() # and the *seupd (symmetric matrix) in eigsh(). # There is no Hermetian complex/double complex interface. # To find eigenvalues of a Hermetian matrix you # must use eigs() and not eigsh() # It might be desirable to handle the Hermetian case differently # and, for example, return real eigenvalues. # Number of eigenvalues returned and complex eigenvalues # ------------------------------------------------------ # The ARPACK nonsymmetric real and double interface (s,d)naupd return # eigenvalues and eigenvectors in real (float,double) arrays. # Since the eigenvalues and eigenvectors are, in general, complex # ARPACK puts the real and imaginary parts in consecutive entries # in real-valued arrays. This wrapper puts the real entries # into complex data types and attempts to return the requested eigenvalues # and eigenvectors. # Solver modes # ------------ # ARPACK and handle shifted and shift-inverse computations # for eigenvalues by providing a shift (sigma) and a solver. __docformat__ = "restructuredtext en" __all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence'] import warnings from scipy.sparse.linalg.eigen.arpack import _arpack import numpy as np from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator from scipy.sparse import identity, isspmatrix, isspmatrix_csr from scipy.linalg import lu_factor, lu_solve from scipy.sparse.sputils import isdense from scipy.sparse.linalg import gmres, splu import scipy from distutils.version import LooseVersion _type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'} _ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12} DNAUPD_ERRORS = { 0: "Normal exit.", 1: "Maximum number of iterations taken. " "All possible eigenvalues of OP has been found. IPARAM(5) " "returns the number of wanted converged Ritz values.", 2: "No longer an informational error. Deprecated starting " "with release 2 of ARPACK.", 3: "No shifts could be applied during a cycle of the " "Implicitly restarted Arnoldi iteration. One possibility " "is to increase the size of NCV relative to NEV. ", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV-NEV >= 2 and less than or equal to N.", -4: "The maximum number of Arnoldi update iterations allowed " "must be greater than zero.", -5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work array WORKL is not sufficient.", -8: "Error return from LAPACK eigenvalue calculation;", -9: "Starting vector is zero.", -10: "IPARAM(7) must be 1,2,3,4.", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "IPARAM(1) must be equal to 0 or 1.", -13: "NEV and WHICH = 'BE' are incompatible.", -9999: "Could not build an Arnoldi factorization. " "IPARAM(5) returns the size of the current Arnoldi " "factorization. The user is advised to check that " "enough workspace and array storage has been allocated." } SNAUPD_ERRORS = DNAUPD_ERRORS ZNAUPD_ERRORS = DNAUPD_ERRORS.copy() ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3." CNAUPD_ERRORS = ZNAUPD_ERRORS DSAUPD_ERRORS = { 0: "Normal exit.", 1: "Maximum number of iterations taken. " "All possible eigenvalues of OP has been found.", 2: "No longer an informational error. Deprecated starting with " "release 2 of ARPACK.", 3: "No shifts could be applied during a cycle of the Implicitly " "restarted Arnoldi iteration. One possibility is to increase " "the size of NCV relative to NEV. ", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV must be greater than NEV and less than or equal to N.", -4: "The maximum number of Arnoldi update iterations allowed " "must be greater than zero.", -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work array WORKL is not sufficient.", -8: "Error return from trid. eigenvalue calculation; " "Informational error from LAPACK routine dsteqr .", -9: "Starting vector is zero.", -10: "IPARAM(7) must be 1,2,3,4,5.", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "IPARAM(1) must be equal to 0 or 1.", -13: "NEV and WHICH = 'BE' are incompatible. ", -9999: "Could not build an Arnoldi factorization. " "IPARAM(5) returns the size of the current Arnoldi " "factorization. The user is advised to check that " "enough workspace and array storage has been allocated.", } SSAUPD_ERRORS = DSAUPD_ERRORS DNEUPD_ERRORS = { 0: "Normal exit.", 1: "The Schur form computed by LAPACK routine dlahqr " "could not be reordered by LAPACK routine dtrsen. " "Re-enter subroutine dneupd with IPARAM(5)NCV and " "increase the size of the arrays DR and DI to have " "dimension at least dimension NCV and allocate at least NCV " "columns for Z. NOTE: Not necessary if Z and V share " "the same space. Please notify the authors if this error " "occurs.", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV-NEV >= 2 and less than or equal to N.", -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work WORKL array is not sufficient.", -8: "Error return from calculation of a real Schur form. " "Informational error from LAPACK routine dlahqr .", -9: "Error return from calculation of eigenvectors. " "Informational error from LAPACK routine dtrevc.", -10: "IPARAM(7) must be 1,2,3,4.", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "HOWMNY = 'S' not yet implemented", -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.", -14: "DNAUPD did not find any eigenvalues to sufficient " "accuracy.", -15: "DNEUPD got a different count of the number of converged " "Ritz values than DNAUPD got. This indicates the user " "probably made an error in passing data from DNAUPD to " "DNEUPD or that the data was modified before entering " "DNEUPD", } SNEUPD_ERRORS = DNEUPD_ERRORS.copy() SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr " "could not be reordered by LAPACK routine strsen . " "Re-enter subroutine dneupd with IPARAM(5)=NCV and " "increase the size of the arrays DR and DI to have " "dimension at least dimension NCV and allocate at least " "NCV columns for Z. NOTE: Not necessary if Z and V share " "the same space. Please notify the authors if this error " "occurs.") SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient " "accuracy.") SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of " "converged Ritz values than SNAUPD got. This indicates " "the user probably made an error in passing data from " "SNAUPD to SNEUPD or that the data was modified before " "entering SNEUPD") ZNEUPD_ERRORS = {0: "Normal exit.", 1: "The Schur form computed by LAPACK routine csheqr " "could not be reordered by LAPACK routine ztrsen. " "Re-enter subroutine zneupd with IPARAM(5)=NCV and " "increase the size of the array D to have " "dimension at least dimension NCV and allocate at least " "NCV columns for Z. NOTE: Not necessary if Z and V share " "the same space. Please notify the authors if this error " "occurs.", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV-NEV >= 1 and less than or equal to N.", -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work WORKL array is not sufficient.", -8: "Error return from LAPACK eigenvalue calculation. " "This should never happened.", -9: "Error return from calculation of eigenvectors. " "Informational error from LAPACK routine ztrevc.", -10: "IPARAM(7) must be 1,2,3", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "HOWMNY = 'S' not yet implemented", -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.", -14: "ZNAUPD did not find any eigenvalues to sufficient " "accuracy.", -15: "ZNEUPD got a different count of the number of " "converged Ritz values than ZNAUPD got. This " "indicates the user probably made an error in passing " "data from ZNAUPD to ZNEUPD or that the data was " "modified before entering ZNEUPD"} CNEUPD_ERRORS = ZNEUPD_ERRORS.copy() CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient " "accuracy.") CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of " "converged Ritz values than CNAUPD got. This indicates " "the user probably made an error in passing data from " "CNAUPD to CNEUPD or that the data was modified before " "entering CNEUPD") DSEUPD_ERRORS = { 0: "Normal exit.", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV must be greater than NEV and less than or equal to N.", -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work WORKL array is not sufficient.", -8: ("Error return from trid. eigenvalue calculation; " "Information error from LAPACK routine dsteqr."), -9: "Starting vector is zero.", -10: "IPARAM(7) must be 1,2,3,4,5.", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "NEV and WHICH = 'BE' are incompatible.", -14: "DSAUPD did not find any eigenvalues to sufficient accuracy.", -15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.", -16: "HOWMNY = 'S' not yet implemented", -17: ("DSEUPD got a different count of the number of converged " "Ritz values than DSAUPD got. This indicates the user " "probably made an error in passing data from DSAUPD to " "DSEUPD or that the data was modified before entering " "DSEUPD.") } SSEUPD_ERRORS = DSEUPD_ERRORS.copy() SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues " "to sufficient accuracy.") SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of " "converged " "Ritz values than SSAUPD got. This indicates the user " "probably made an error in passing data from SSAUPD to " "SSEUPD or that the data was modified before entering " "SSEUPD.") _SAUPD_ERRORS = {'d': DSAUPD_ERRORS, 's': SSAUPD_ERRORS} _NAUPD_ERRORS = {'d': DNAUPD_ERRORS, 's': SNAUPD_ERRORS, 'z': ZNAUPD_ERRORS, 'c': CNAUPD_ERRORS} _SEUPD_ERRORS = {'d': DSEUPD_ERRORS, 's': SSEUPD_ERRORS} _NEUPD_ERRORS = {'d': DNEUPD_ERRORS, 's': SNEUPD_ERRORS, 'z': ZNEUPD_ERRORS, 'c': CNEUPD_ERRORS} # accepted values of parameter WHICH in _SEUPD _SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE'] # accepted values of parameter WHICH in _NAUPD _NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI'] class ArpackError(RuntimeError): """ ARPACK error """ def __init__(self, info, infodict=_NAUPD_ERRORS): msg = infodict.get(info, "Unknown error") RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg)) class ArpackNoConvergence(ArpackError): """ ARPACK iteration did not converge Attributes ---------- eigenvalues : ndarray Partial result. Converged eigenvalues. eigenvectors : ndarray Partial result. Converged eigenvectors. """ def __init__(self, msg, eigenvalues, eigenvectors): ArpackError.__init__(self, -1, {-1: msg}) self.eigenvalues = eigenvalues self.eigenvectors = eigenvectors class _ArpackParams(object): def __init__(self, n, k, tp, mode=1, sigma=None, ncv=None, v0=None, maxiter=None, which="LM", tol=0): if k <= 0: raise ValueError("k must be positive, k=%d" % k) if maxiter is None: maxiter = n * 10 if maxiter <= 0: raise ValueError("maxiter must be positive, maxiter=%d" % maxiter) if tp not in 'fdFD': raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'") if v0 is not None: # ARPACK overwrites its initial resid, make a copy self.resid = np.array(v0, copy=True) info = 1 else: self.resid = np.zeros(n, tp) info = 0 if sigma is None: #sigma not used self.sigma = 0 else: self.sigma = sigma if ncv is None: ncv = 2 * k + 1 ncv = min(ncv, n) self.v = np.zeros((n, ncv), tp) # holds Ritz vectors self.iparam = np.zeros(11, "int") # set solver mode and parameters ishfts = 1 self.mode = mode self.iparam[0] = ishfts self.iparam[2] = maxiter self.iparam[3] = 1 self.iparam[6] = mode self.n = n self.tol = tol self.k = k self.maxiter = maxiter self.ncv = ncv self.which = which self.tp = tp self.info = info self.converged = False self.ido = 0 def _raise_no_convergence(self): msg = "No convergence (%d iterations, %d/%d eigenvectors converged)" k_ok = self.iparam[4] num_iter = self.iparam[2] try: ev, vec = self.extract(True) except ArpackError as err: msg = "%s [%s]" % (msg, err) ev = np.zeros((0,)) vec = np.zeros((self.n, 0)) k_ok = 0 raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec) class _SymmetricArpackParams(_ArpackParams): def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None, Minv_matvec=None, sigma=None, ncv=None, v0=None, maxiter=None, which="LM", tol=0): # The following modes are supported: # mode = 1: # Solve the standard eigenvalue problem: # A*x = lambda*x : # A - symmetric # Arguments should be # matvec = left multiplication by A # M_matvec = None [not used] # Minv_matvec = None [not used] # # mode = 2: # Solve the general eigenvalue problem: # A*x = lambda*M*x # A - symmetric # M - symmetric positive definite # Arguments should be # matvec = left multiplication by A # M_matvec = left multiplication by M # Minv_matvec = left multiplication by M^-1 # # mode = 3: # Solve the general eigenvalue problem in shift-invert mode: # A*x = lambda*M*x # A - symmetric # M - symmetric positive semi-definite # Arguments should be # matvec = None [not used] # M_matvec = left multiplication by M # or None, if M is the identity # Minv_matvec = left multiplication by [A-sigma*M]^-1 # # mode = 4: # Solve the general eigenvalue problem in Buckling mode: # A*x = lambda*AG*x # A - symmetric positive semi-definite # AG - symmetric indefinite # Arguments should be # matvec = left multiplication by A # M_matvec = None [not used] # Minv_matvec = left multiplication by [A-sigma*AG]^-1 # # mode = 5: # Solve the general eigenvalue problem in Cayley-transformed mode: # A*x = lambda*M*x # A - symmetric # M - symmetric positive semi-definite # Arguments should be # matvec = left multiplication by A # M_matvec = left multiplication by M # or None, if M is the identity # Minv_matvec = left multiplication by [A-sigma*M]^-1 if mode == 1: if matvec is None: raise ValueError("matvec must be specified for mode=1") if M_matvec is not None: raise ValueError("M_matvec cannot be specified for mode=1") if Minv_matvec is not None: raise ValueError("Minv_matvec cannot be specified for mode=1") self.OP = matvec self.B = lambda x: x self.bmat = 'I' elif mode == 2: if matvec is None: raise ValueError("matvec must be specified for mode=2") if M_matvec is None: raise ValueError("M_matvec must be specified for mode=2") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=2") self.OP = lambda x: Minv_matvec(matvec(x)) self.OPa = Minv_matvec self.OPb = matvec self.B = M_matvec self.bmat = 'G' elif mode == 3: if matvec is not None: raise ValueError("matvec must not be specified for mode=3") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=3") if M_matvec is None: self.OP = Minv_matvec self.OPa = Minv_matvec self.B = lambda x: x self.bmat = 'I' else: self.OP = lambda x: Minv_matvec(M_matvec(x)) self.OPa = Minv_matvec self.B = M_matvec self.bmat = 'G' elif mode == 4: if matvec is None: raise ValueError("matvec must be specified for mode=4") if M_matvec is not None: raise ValueError("M_matvec must not be specified for mode=4") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=4") self.OPa = Minv_matvec self.OP = lambda x: self.OPa(matvec(x)) self.B = matvec self.bmat = 'G' elif mode == 5: if matvec is None: raise ValueError("matvec must be specified for mode=5") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=5") self.OPa = Minv_matvec self.A_matvec = matvec if M_matvec is None: self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x) self.B = lambda x: x self.bmat = 'I' else: self.OP = lambda x: Minv_matvec(matvec(x) + sigma * M_matvec(x)) self.B = M_matvec self.bmat = 'G' else: raise ValueError("mode=%i not implemented" % mode) if which not in _SEUPD_WHICH: raise ValueError("which must be one of %s" % ' '.join(_SEUPD_WHICH)) if k >= n: raise ValueError("k must be less than rank(A), k=%d" % k) _ArpackParams.__init__(self, n, k, tp, mode, sigma, ncv, v0, maxiter, which, tol) if self.ncv > n or self.ncv <= k: raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv) self.workd = np.zeros(3 * n, self.tp) self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp) ltr = _type_conv[self.tp] if ltr not in ["s", "d"]: raise ValueError("Input matrix is not real-valued.") self._arpack_solver = _arpack.__dict__[ltr + 'saupd'] self._arpack_extract = _arpack.__dict__[ltr + 'seupd'] self.iterate_infodict = _SAUPD_ERRORS[ltr] self.extract_infodict = _SEUPD_ERRORS[ltr] self.ipntr = np.zeros(11, "int") def iterate(self): self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \ self._arpack_solver(self.ido, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.info) xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n) yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n) if self.ido == -1: # initialization self.workd[yslice] = self.OP(self.workd[xslice]) elif self.ido == 1: # compute y = Op*x if self.mode == 1: self.workd[yslice] = self.OP(self.workd[xslice]) elif self.mode == 2: self.workd[xslice] = self.OPb(self.workd[xslice]) self.workd[yslice] = self.OPa(self.workd[xslice]) elif self.mode == 5: Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n) Ax = self.A_matvec(self.workd[xslice]) self.workd[yslice] = self.OPa(Ax + (self.sigma * self.workd[Bxslice])) else: Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n) self.workd[yslice] = self.OPa(self.workd[Bxslice]) elif self.ido == 2: self.workd[yslice] = self.B(self.workd[xslice]) elif self.ido == 3: raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0") else: self.converged = True if self.info == 0: pass elif self.info == 1: self._raise_no_convergence() else: raise ArpackError(self.info, infodict=self.iterate_infodict) def extract(self, return_eigenvectors): rvec = return_eigenvectors ierr = 0 howmny = 'A' # return all eigenvectors sselect = np.zeros(self.ncv, 'int') # unused d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam[0:7], self.ipntr, self.workd[0:2 * self.n], self.workl, ierr) if ierr != 0: raise ArpackError(ierr, infodict=self.extract_infodict) k_ok = self.iparam[4] d = d[:k_ok] z = z[:, :k_ok] if return_eigenvectors: return d, z else: return d class _UnsymmetricArpackParams(_ArpackParams): def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None, Minv_matvec=None, sigma=None, ncv=None, v0=None, maxiter=None, which="LM", tol=0): # The following modes are supported: # mode = 1: # Solve the standard eigenvalue problem: # A*x = lambda*x # A - square matrix # Arguments should be # matvec = left multiplication by A # M_matvec = None [not used] # Minv_matvec = None [not used] # # mode = 2: # Solve the generalized eigenvalue problem: # A*x = lambda*M*x # A - square matrix # M - symmetric, positive semi-definite # Arguments should be # matvec = left multiplication by A # M_matvec = left multiplication by M # Minv_matvec = left multiplication by M^-1 # # mode = 3,4: # Solve the general eigenvalue problem in shift-invert mode: # A*x = lambda*M*x # A - square matrix # M - symmetric, positive semi-definite # Arguments should be # matvec = None [not used] # M_matvec = left multiplication by M # or None, if M is the identity # Minv_matvec = left multiplication by [A-sigma*M]^-1 # if A is real and mode==3, use the real part of Minv_matvec # if A is real and mode==4, use the imag part of Minv_matvec # if A is complex and mode==3, # use real and imag parts of Minv_matvec if mode == 1: if matvec is None: raise ValueError("matvec must be specified for mode=1") if M_matvec is not None: raise ValueError("M_matvec cannot be specified for mode=1") if Minv_matvec is not None: raise ValueError("Minv_matvec cannot be specified for mode=1") self.OP = matvec self.B = lambda x: x self.bmat = 'I' elif mode == 2: if matvec is None: raise ValueError("matvec must be specified for mode=2") if M_matvec is None: raise ValueError("M_matvec must be specified for mode=2") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=2") self.OP = lambda x: Minv_matvec(matvec(x)) self.OPa = Minv_matvec self.OPb = matvec self.B = M_matvec self.bmat = 'G' elif mode in (3, 4): if matvec is None: raise ValueError("matvec must be specified " "for mode in (3,4)") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified " "for mode in (3,4)") self.matvec = matvec if tp in 'DF': # complex type if mode == 3: self.OPa = Minv_matvec else: raise ValueError("mode=4 invalid for complex A") else: # real type if mode == 3: self.OPa = lambda x: np.real(Minv_matvec(x)) else: self.OPa = lambda x: np.imag(Minv_matvec(x)) if M_matvec is None: self.B = lambda x: x self.bmat = 'I' self.OP = self.OPa else: self.B = M_matvec self.bmat = 'G' self.OP = lambda x: self.OPa(M_matvec(x)) else: raise ValueError("mode=%i not implemented" % mode) if which not in _NEUPD_WHICH: raise ValueError("Parameter which must be one of %s" % ' '.join(_NEUPD_WHICH)) if k >= n - 1: raise ValueError("k must be less than rank(A)-1, k=%d" % k) _ArpackParams.__init__(self, n, k, tp, mode, sigma, ncv, v0, maxiter, which, tol) if self.ncv > n or self.ncv <= k + 1: raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv) self.workd = np.zeros(3 * n, self.tp) self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp) ltr = _type_conv[self.tp] self._arpack_solver = _arpack.__dict__[ltr + 'naupd'] self._arpack_extract = _arpack.__dict__[ltr + 'neupd'] self.iterate_infodict = _NAUPD_ERRORS[ltr] self.extract_infodict = _NEUPD_ERRORS[ltr] self.ipntr = np.zeros(14, "int") if self.tp in 'FD': self.rwork = np.zeros(self.ncv, self.tp.lower()) else: self.rwork = None def iterate(self): if self.tp in 'fd': self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\ self._arpack_solver(self.ido, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.info) else: self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\ self._arpack_solver(self.ido, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.rwork, self.info) xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n) yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n) if self.ido == -1: # initialization self.workd[yslice] = self.OP(self.workd[xslice]) elif self.ido == 1: # compute y = Op*x if self.mode in (1, 2): self.workd[yslice] = self.OP(self.workd[xslice]) else: Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n) self.workd[yslice] = self.OPa(self.workd[Bxslice]) elif self.ido == 2: self.workd[yslice] = self.B(self.workd[xslice]) elif self.ido == 3: raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0") else: self.converged = True if self.info == 0: pass elif self.info == 1: self._raise_no_convergence() else: raise ArpackError(self.info, infodict=self.iterate_infodict) def extract(self, return_eigenvectors): k, n = self.k, self.n ierr = 0 howmny = 'A' # return all eigenvectors sselect = np.zeros(self.ncv, 'int') # unused sigmar = np.real(self.sigma) sigmai = np.imag(self.sigma) workev = np.zeros(3 * self.ncv, self.tp) if self.tp in 'fd': dr = np.zeros(k + 1, self.tp) di = np.zeros(k + 1, self.tp) zr = np.zeros((n, k + 1), self.tp) dr, di, zr, ierr = \ self._arpack_extract( return_eigenvectors, howmny, sselect, sigmar, sigmai, workev, self.bmat, self.which, k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.info) if ierr != 0: raise ArpackError(ierr, infodict=self.extract_infodict) nreturned = self.iparam[4] # number of good eigenvalues returned # Build complex eigenvalues from real and imaginary parts d = dr + 1.0j * di # Arrange the eigenvectors: complex eigenvectors are stored as # real,imaginary in consecutive columns z = zr.astype(self.tp.upper()) # The ARPACK nonsymmetric real and double interface (s,d)naupd # return eigenvalues and eigenvectors in real (float,double) # arrays. # Efficiency: this should check that return_eigenvectors == True # before going through this construction. if sigmai == 0: i = 0 while i <= k: # check if complex if abs(d[i].imag) != 0: # this is a complex conjugate pair with eigenvalues # in consecutive columns if i < k: z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1] z[:, i + 1] = z[:, i].conjugate() i += 1 else: #last eigenvalue is complex: the imaginary part of # the eigenvector has not been returned #this can only happen if nreturned > k, so we'll # throw out this case. nreturned -= 1 i += 1 else: # real matrix, mode 3 or 4, imag(sigma) is nonzero: # see remark 3 in <s,d>neupd.f # Build complex eigenvalues from real and imaginary parts i = 0 while i <= k: if abs(d[i].imag) == 0: d[i] = np.dot(zr[:, i], self.matvec(zr[:, i])) else: if i < k: z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1] z[:, i + 1] = z[:, i].conjugate() d[i] = ((np.dot(zr[:, i], self.matvec(zr[:, i])) + np.dot(zr[:, i + 1], self.matvec(zr[:, i + 1]))) + 1j * (np.dot(zr[:, i], self.matvec(zr[:, i + 1])) - np.dot(zr[:, i + 1], self.matvec(zr[:, i])))) d[i + 1] = d[i].conj() i += 1 else: #last eigenvalue is complex: the imaginary part of # the eigenvector has not been returned #this can only happen if nreturned > k, so we'll # throw out this case. nreturned -= 1 i += 1 # Now we have k+1 possible eigenvalues and eigenvectors # Return the ones specified by the keyword "which" if nreturned <= k: # we got less or equal as many eigenvalues we wanted d = d[:nreturned] z = z[:, :nreturned] else: # we got one extra eigenvalue (likely a cc pair, but which?) # cut at approx precision for sorting rd = np.round(d, decimals=_ndigits[self.tp]) if self.which in ['LR', 'SR']: ind = np.argsort(rd.real) elif self.which in ['LI', 'SI']: # for LI,SI ARPACK returns largest,smallest # abs(imaginary) why? ind = np.argsort(abs(rd.imag)) else: ind = np.argsort(abs(rd)) if self.which in ['LR', 'LM', 'LI']: d = d[ind[-k:]] z = z[:, ind[-k:]] if self.which in ['SR', 'SM', 'SI']: d = d[ind[:k]] z = z[:, ind[:k]] else: # complex is so much simpler... d, z, ierr =\ self._arpack_extract( return_eigenvectors, howmny, sselect, self.sigma, workev, self.bmat, self.which, k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.rwork, ierr) if ierr != 0: raise ArpackError(ierr, infodict=self.extract_infodict) k_ok = self.iparam[4] d = d[:k_ok] z = z[:, :k_ok] if return_eigenvectors: return d, z else: return d def _aslinearoperator_with_dtype(m): m = aslinearoperator(m) if not hasattr(m, 'dtype'): x = np.zeros(m.shape[1]) m.dtype = (m * x).dtype return m class SpLuInv(LinearOperator): """ SpLuInv: helper class to repeatedly solve M*x=b using a sparse LU-decopposition of M """ def __init__(self, M): self.M_lu = splu(M) LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype) self.isreal = not np.issubdtype(self.dtype, np.complexfloating) def _matvec(self, x): # careful here: splu.solve will throw away imaginary # part of x if M is real if self.isreal and np.issubdtype(x.dtype, np.complexfloating): return (self.M_lu.solve(np.real(x)) + 1j * self.M_lu.solve(np.imag(x))) else: return self.M_lu.solve(x) class LuInv(LinearOperator): """ LuInv: helper class to repeatedly solve M*x=b using an LU-decomposition of M """ def __init__(self, M): self.M_lu = lu_factor(M) LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype) def _matvec(self, x): return lu_solve(self.M_lu, x) class IterInv(LinearOperator): """ IterInv: helper class to repeatedly solve M*x=b using an iterative method. """ def __init__(self, M, ifunc=gmres, tol=0): if tol <= 0: # when tol=0, ARPACK uses machine tolerance as calculated # by LAPACK's _LAMCH function. We should match this tol = np.finfo(M.dtype).eps self.M = M self.ifunc = ifunc self.tol = tol if hasattr(M, 'dtype'): dtype = M.dtype else: x = np.zeros(M.shape[1]) dtype = (M * x).dtype LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype) def _matvec(self, x): b, info = self.ifunc(self.M, x, tol=self.tol) if info != 0: raise ValueError("Error in inverting M: function " "%s did not converge (info = %i)." % (self.ifunc.__name__, info)) return b class IterOpInv(LinearOperator): """ IterOpInv: helper class to repeatedly solve [A-sigma*M]*x = b using an iterative method """ def __init__(self, A, M, sigma, ifunc=gmres, tol=0): if tol <= 0: # when tol=0, ARPACK uses machine tolerance as calculated # by LAPACK's _LAMCH function. We should match this tol = np.finfo(A.dtype).eps self.A = A self.M = M self.sigma = sigma self.ifunc = ifunc self.tol = tol x = np.zeros(A.shape[1]) if M is None: dtype = self.mult_func_M_None(x).dtype self.OP = LinearOperator(self.A.shape, self.mult_func_M_None, dtype=dtype) else: dtype = self.mult_func(x).dtype self.OP = LinearOperator(self.A.shape, self.mult_func, dtype=dtype) LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype) def mult_func(self, x): return self.A.matvec(x) - self.sigma * self.M.matvec(x) def mult_func_M_None(self, x): return self.A.matvec(x) - self.sigma * x def _matvec(self, x): b, info = self.ifunc(self.OP, x, tol=self.tol) if info != 0: raise ValueError("Error in inverting [A-sigma*M]: function " "%s did not converge (info = %i)." % (self.ifunc.__name__, info)) return b def get_inv_matvec(M, symmetric=False, tol=0): if isdense(M): return LuInv(M).matvec elif isspmatrix(M): if isspmatrix_csr(M) and symmetric: M = M.T return SpLuInv(M).matvec else: return IterInv(M, tol=tol).matvec def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0): if sigma == 0: return get_inv_matvec(A, symmetric=symmetric, tol=tol) if M is None: #M is the identity matrix if isdense(A): if (np.issubdtype(A.dtype, np.complexfloating) or np.imag(sigma) == 0): A = np.copy(A) else: A = A + 0j A.flat[::A.shape[1] + 1] -= sigma return LuInv(A).matvec elif isspmatrix(A): A = A - sigma * identity(A.shape[0]) if symmetric and isspmatrix_csr(A): A = A.T return SpLuInv(A.tocsc()).matvec else: return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma, tol=tol).matvec else: if ((not isdense(A) and not isspmatrix(A)) or (not isdense(M) and not isspmatrix(M))): return IterOpInv(_aslinearoperator_with_dtype(A), _aslinearoperator_with_dtype(M), sigma, tol=tol).matvec elif isdense(A) or isdense(M): return LuInv(A - sigma * M).matvec else: OP = A - sigma * M if symmetric and isspmatrix_csr(OP): OP = OP.T return SpLuInv(OP.tocsc()).matvec def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None, maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None, OPpart=None): """ Find k eigenvalues and eigenvectors of the square matrix A. Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i]. If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the generalized eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i] Parameters ---------- A : An N x N matrix, array, sparse matrix, or LinearOperator representing \ the operation A * x, where A is a real or complex square matrix. k : int, default 6 The number of eigenvalues and eigenvectors desired. `k` must be smaller than N. It is not possible to compute all eigenvectors of a matrix. return_eigenvectors : boolean, default True Whether to return the eigenvectors along with the eigenvalues. M : An N x N matrix, array, sparse matrix, or LinearOperator representing the operation M*x for the generalized eigenvalue problem ``A * x = w * M * x`` M must represent a real symmetric matrix. For best results, M should be of the same type as A. Additionally: * If sigma==None, M is positive definite * If sigma is specified, M is positive semi-definite If sigma==None, eigs requires an operator to compute the solution of the linear equation `M * x = b`. This is done internally via a (sparse) LU decomposition for an explicit matrix M, or via an iterative solver for a general linear operator. Alternatively, the user can supply the matrix or operator Minv, which gives x = Minv * b = M^-1 * b sigma : real or complex Find eigenvalues near sigma using shift-invert mode. This requires an operator to compute the solution of the linear system `[A - sigma * M] * x = b`, where M is the identity matrix if unspecified. This is computed internally via a (sparse) LU decomposition for explicit matrices A & M, or via an iterative solver if either A or M is a general linear operator. Alternatively, the user can supply the matrix or operator OPinv, which gives x = OPinv * b = [A - sigma * M]^-1 * b. For a real matrix A, shift-invert can either be done in imaginary mode or real mode, specified by the parameter OPpart ('r' or 'i'). Note that when sigma is specified, the keyword 'which' (below) refers to the shifted eigenvalues w'[i] where: * If A is real and OPpart == 'r' (default), w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ] * If A is real and OPpart == 'i', w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ] * If A is complex, w'[i] = 1/(w[i]-sigma) v0 : array Starting vector for iteration. ncv : integer The number of Lanczos vectors generated `ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``. which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'] Which `k` eigenvectors and eigenvalues to find: - 'LM' : largest magnitude - 'SM' : smallest magnitude - 'LR' : largest real part - 'SR' : smallest real part - 'LI' : largest imaginary part - 'SI' : smallest imaginary part When sigma != None, 'which' refers to the shifted eigenvalues w'[i] (see discussion in 'sigma', above). ARPACK is generally better at finding large values than small values. If small eigenvalues are desired, consider using shift-invert mode for better performance. maxiter : integer Maximum number of Arnoldi update iterations allowed tol : float Relative accuracy for eigenvalues (stopping criterion) The default value of 0 implies machine precision. return_eigenvectors : boolean Return eigenvectors (True) in addition to eigenvalues Minv : N x N matrix, array, sparse matrix, or linear operator See notes in M, above. OPinv : N x N matrix, array, sparse matrix, or linear operator See notes in sigma, above. OPpart : 'r' or 'i'. See notes in sigma, above Returns ------- w : array Array of k eigenvalues. v : array An array of `k` eigenvectors. ``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i]. Raises ------ ArpackNoConvergence When the requested convergence is not obtained. The currently converged eigenvalues and eigenvectors can be found as ``eigenvalues`` and ``eigenvectors`` attributes of the exception object. See Also -------- eigsh : eigenvalues and eigenvectors for symmetric matrix A svds : singular value decomposition for a matrix A Examples -------- Find 6 eigenvectors of the identity matrix: >>> from sklearn.utils.arpack import eigs >>> id = np.identity(13) >>> vals, vecs = eigs(id, k=6) >>> vals array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j]) >>> vecs.shape (13, 6) Notes ----- This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD, ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to find the eigenvalues and eigenvectors [2]_. References ---------- .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: Solution of Large Scale Eigenvalue Problems by Implicitly Restarted Arnoldi Methods. SIAM, Philadelphia, PA, 1998. """ if A.shape[0] != A.shape[1]: raise ValueError('expected square matrix (shape=%s)' % (A.shape,)) if M is not None: if M.shape != A.shape: raise ValueError('wrong M dimensions %s, should be %s' % (M.shape, A.shape)) if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower(): warnings.warn('M does not have the same type precision as A. ' 'This may adversely affect ARPACK convergence') n = A.shape[0] if k <= 0 or k >= n: raise ValueError("k must be between 1 and rank(A)-1") if sigma is None: matvec = _aslinearoperator_with_dtype(A).matvec if OPinv is not None: raise ValueError("OPinv should not be specified " "with sigma = None.") if OPpart is not None: raise ValueError("OPpart should not be specified with " "sigma = None or complex A") if M is None: #standard eigenvalue problem mode = 1 M_matvec = None Minv_matvec = None if Minv is not None: raise ValueError("Minv should not be " "specified with M = None.") else: #general eigenvalue problem mode = 2 if Minv is None: Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol) else: Minv = _aslinearoperator_with_dtype(Minv) Minv_matvec = Minv.matvec M_matvec = _aslinearoperator_with_dtype(M).matvec else: #sigma is not None: shift-invert mode if np.issubdtype(A.dtype, np.complexfloating): if OPpart is not None: raise ValueError("OPpart should not be specified " "with sigma=None or complex A") mode = 3 elif OPpart is None or OPpart.lower() == 'r': mode = 3 elif OPpart.lower() == 'i': if np.imag(sigma) == 0: raise ValueError("OPpart cannot be 'i' if sigma is real") mode = 4 else: raise ValueError("OPpart must be one of ('r','i')") matvec = _aslinearoperator_with_dtype(A).matvec if Minv is not None: raise ValueError("Minv should not be specified when sigma is") if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, symmetric=False, tol=tol) else: OPinv = _aslinearoperator_with_dtype(OPinv) Minv_matvec = OPinv.matvec if M is None: M_matvec = None else: M_matvec = _aslinearoperator_with_dtype(M).matvec params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode, M_matvec, Minv_matvec, sigma, ncv, v0, maxiter, which, tol) while not params.converged: params.iterate() return params.extract(return_eigenvectors) def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None, maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None, mode='normal'): """ Find k eigenvalues and eigenvectors of the real symmetric square matrix or complex hermitian matrix A. Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i]. If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the generalized eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i] Parameters ---------- A : An N x N matrix, array, sparse matrix, or LinearOperator representing the operation A * x, where A is a real symmetric matrix For buckling mode (see below) A must additionally be positive-definite k : integer The number of eigenvalues and eigenvectors desired. `k` must be smaller than N. It is not possible to compute all eigenvectors of a matrix. M : An N x N matrix, array, sparse matrix, or linear operator representing the operation M * x for the generalized eigenvalue problem ``A * x = w * M * x``. M must represent a real, symmetric matrix. For best results, M should be of the same type as A. Additionally: * If sigma == None, M is symmetric positive definite * If sigma is specified, M is symmetric positive semi-definite * In buckling mode, M is symmetric indefinite. If sigma == None, eigsh requires an operator to compute the solution of the linear equation `M * x = b`. This is done internally via a (sparse) LU decomposition for an explicit matrix M, or via an iterative solver for a general linear operator. Alternatively, the user can supply the matrix or operator Minv, which gives x = Minv * b = M^-1 * b sigma : real Find eigenvalues near sigma using shift-invert mode. This requires an operator to compute the solution of the linear system `[A - sigma * M] x = b`, where M is the identity matrix if unspecified. This is computed internally via a (sparse) LU decomposition for explicit matrices A & M, or via an iterative solver if either A or M is a general linear operator. Alternatively, the user can supply the matrix or operator OPinv, which gives x = OPinv * b = [A - sigma * M]^-1 * b. Note that when sigma is specified, the keyword 'which' refers to the shifted eigenvalues w'[i] where: - if mode == 'normal', w'[i] = 1 / (w[i] - sigma) - if mode == 'cayley', w'[i] = (w[i] + sigma) / (w[i] - sigma) - if mode == 'buckling', w'[i] = w[i] / (w[i] - sigma) (see further discussion in 'mode' below) v0 : array Starting vector for iteration. ncv : integer The number of Lanczos vectors generated ncv must be greater than k and smaller than n; it is recommended that ncv > 2*k which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE'] If A is a complex hermitian matrix, 'BE' is invalid. Which `k` eigenvectors and eigenvalues to find - 'LM' : Largest (in magnitude) eigenvalues - 'SM' : Smallest (in magnitude) eigenvalues - 'LA' : Largest (algebraic) eigenvalues - 'SA' : Smallest (algebraic) eigenvalues - 'BE' : Half (k/2) from each end of the spectrum When k is odd, return one more (k/2+1) from the high end When sigma != None, 'which' refers to the shifted eigenvalues w'[i] (see discussion in 'sigma', above). ARPACK is generally better at finding large values than small values. If small eigenvalues are desired, consider using shift-invert mode for better performance. maxiter : integer Maximum number of Arnoldi update iterations allowed tol : float Relative accuracy for eigenvalues (stopping criterion). The default value of 0 implies machine precision. Minv : N x N matrix, array, sparse matrix, or LinearOperator See notes in M, above OPinv : N x N matrix, array, sparse matrix, or LinearOperator See notes in sigma, above. return_eigenvectors : boolean Return eigenvectors (True) in addition to eigenvalues mode : string ['normal' | 'buckling' | 'cayley'] Specify strategy to use for shift-invert mode. This argument applies only for real-valued A and sigma != None. For shift-invert mode, ARPACK internally solves the eigenvalue problem ``OP * x'[i] = w'[i] * B * x'[i]`` and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i] into the desired eigenvectors and eigenvalues of the problem ``A * x[i] = w[i] * M * x[i]``. The modes are as follows: - 'normal' : OP = [A - sigma * M]^-1 * M B = M w'[i] = 1 / (w[i] - sigma) - 'buckling' : OP = [A - sigma * M]^-1 * A B = A w'[i] = w[i] / (w[i] - sigma) - 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M] B = M w'[i] = (w[i] + sigma) / (w[i] - sigma) The choice of mode will affect which eigenvalues are selected by the keyword 'which', and can also impact the stability of convergence (see [2] for a discussion) Returns ------- w : array Array of k eigenvalues v : array An array of k eigenvectors The v[i] is the eigenvector corresponding to the eigenvector w[i] Raises ------ ArpackNoConvergence When the requested convergence is not obtained. The currently converged eigenvalues and eigenvectors can be found as ``eigenvalues`` and ``eigenvectors`` attributes of the exception object. See Also -------- eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A svds : singular value decomposition for a matrix A Notes ----- This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD functions which use the Implicitly Restarted Lanczos Method to find the eigenvalues and eigenvectors [2]_. Examples -------- >>> from sklearn.utils.arpack import eigsh >>> id = np.identity(13) >>> vals, vecs = eigsh(id, k=6) >>> vals # doctest: +SKIP array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j]) >>> print(vecs.shape) (13, 6) References ---------- .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: Solution of Large Scale Eigenvalue Problems by Implicitly Restarted Arnoldi Methods. SIAM, Philadelphia, PA, 1998. """ # complex hermitian matrices should be solved with eigs if np.issubdtype(A.dtype, np.complexfloating): if mode != 'normal': raise ValueError("mode=%s cannot be used with " "complex matrix A" % mode) if which == 'BE': raise ValueError("which='BE' cannot be used with complex matrix A") elif which == 'LA': which = 'LR' elif which == 'SA': which = 'SR' ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0, ncv=ncv, maxiter=maxiter, tol=tol, return_eigenvectors=return_eigenvectors, Minv=Minv, OPinv=OPinv) if return_eigenvectors: return ret[0].real, ret[1] else: return ret.real if A.shape[0] != A.shape[1]: raise ValueError('expected square matrix (shape=%s)' % (A.shape,)) if M is not None: if M.shape != A.shape: raise ValueError('wrong M dimensions %s, should be %s' % (M.shape, A.shape)) if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower(): warnings.warn('M does not have the same type precision as A. ' 'This may adversely affect ARPACK convergence') n = A.shape[0] if k <= 0 or k >= n: raise ValueError("k must be between 1 and rank(A)-1") if sigma is None: A = _aslinearoperator_with_dtype(A) matvec = A.matvec if OPinv is not None: raise ValueError("OPinv should not be specified " "with sigma = None.") if M is None: #standard eigenvalue problem mode = 1 M_matvec = None Minv_matvec = None if Minv is not None: raise ValueError("Minv should not be " "specified with M = None.") else: #general eigenvalue problem mode = 2 if Minv is None: Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol) else: Minv = _aslinearoperator_with_dtype(Minv) Minv_matvec = Minv.matvec M_matvec = _aslinearoperator_with_dtype(M).matvec else: # sigma is not None: shift-invert mode if Minv is not None: raise ValueError("Minv should not be specified when sigma is") # normal mode if mode == 'normal': mode = 3 matvec = None if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, symmetric=True, tol=tol) else: OPinv = _aslinearoperator_with_dtype(OPinv) Minv_matvec = OPinv.matvec if M is None: M_matvec = None else: M = _aslinearoperator_with_dtype(M) M_matvec = M.matvec # buckling mode elif mode == 'buckling': mode = 4 if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, symmetric=True, tol=tol) else: Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec matvec = _aslinearoperator_with_dtype(A).matvec M_matvec = None # cayley-transform mode elif mode == 'cayley': mode = 5 matvec = _aslinearoperator_with_dtype(A).matvec if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, symmetric=True, tol=tol) else: Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec if M is None: M_matvec = None else: M_matvec = _aslinearoperator_with_dtype(M).matvec # unrecognized mode else: raise ValueError("unrecognized mode '%s'" % mode) params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode, M_matvec, Minv_matvec, sigma, ncv, v0, maxiter, which, tol) while not params.converged: params.iterate() return params.extract(return_eigenvectors) def _svds(A, k=6, ncv=None, tol=0): """Compute k singular values/vectors for a sparse matrix using ARPACK. Parameters ---------- A : sparse matrix Array to compute the SVD on k : int, optional Number of singular values and vectors to compute. ncv : integer The number of Lanczos vectors generated ncv must be greater than k+1 and smaller than n; it is recommended that ncv > 2*k tol : float, optional Tolerance for singular values. Zero (default) means machine precision. Notes ----- This is a naive implementation using an eigensolver on A.H * A or A * A.H, depending on which one is more efficient. """ if not (isinstance(A, np.ndarray) or isspmatrix(A)): A = np.asarray(A) n, m = A.shape if np.issubdtype(A.dtype, np.complexfloating): herm = lambda x: x.T.conjugate() eigensolver = eigs else: herm = lambda x: x.T eigensolver = eigsh if n > m: X = A XH = herm(A) else: XH = A X = herm(A) if hasattr(XH, 'dot'): def matvec_XH_X(x): return XH.dot(X.dot(x)) else: def matvec_XH_X(x): return np.dot(XH, np.dot(X, x)) XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype, shape=(X.shape[1], X.shape[1])) # Ignore deprecation warnings here: dot on matrices is deprecated, # but this code is a backport anyhow with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2) s = np.sqrt(eigvals) if n > m: v = eigvec if hasattr(X, 'dot'): u = X.dot(v) / s else: u = np.dot(X, v) / s vh = herm(v) else: u = eigvec if hasattr(X, 'dot'): vh = herm(X.dot(u) / s) else: vh = herm(np.dot(X, u) / s) return u, s, vh # check if backport is actually needed: if scipy.version.version >= LooseVersion('0.10'): from scipy.sparse.linalg import eigs, eigsh, svds else: eigs, eigsh, svds = _eigs, _eigsh, _svds
bsd-3-clause
minhlongdo/scipy
doc/source/tutorial/examples/newton_krylov_preconditioning.py
99
2489
import numpy as np from scipy.optimize import root from scipy.sparse import spdiags, kron from scipy.sparse.linalg import spilu, LinearOperator from numpy import cosh, zeros_like, mgrid, zeros, eye # parameters nx, ny = 75, 75 hx, hy = 1./(nx-1), 1./(ny-1) P_left, P_right = 0, 0 P_top, P_bottom = 1, 0 def get_preconditioner(): """Compute the preconditioner M""" diags_x = zeros((3, nx)) diags_x[0,:] = 1/hx/hx diags_x[1,:] = -2/hx/hx diags_x[2,:] = 1/hx/hx Lx = spdiags(diags_x, [-1,0,1], nx, nx) diags_y = zeros((3, ny)) diags_y[0,:] = 1/hy/hy diags_y[1,:] = -2/hy/hy diags_y[2,:] = 1/hy/hy Ly = spdiags(diags_y, [-1,0,1], ny, ny) J1 = kron(Lx, eye(ny)) + kron(eye(nx), Ly) # Now we have the matrix `J_1`. We need to find its inverse `M` -- # however, since an approximate inverse is enough, we can use # the *incomplete LU* decomposition J1_ilu = spilu(J1) # This returns an object with a method .solve() that evaluates # the corresponding matrix-vector product. We need to wrap it into # a LinearOperator before it can be passed to the Krylov methods: M = LinearOperator(shape=(nx*ny, nx*ny), matvec=J1_ilu.solve) return M def solve(preconditioning=True): """Compute the solution""" count = [0] def residual(P): count[0] += 1 d2x = zeros_like(P) d2y = zeros_like(P) d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2])/hx/hx d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy return d2x + d2y + 5*cosh(P).mean()**2 # preconditioner if preconditioning: M = get_preconditioner() else: M = None # solve guess = zeros((nx, ny), float) sol = root(residual, guess, method='krylov', options={'disp': True, 'jac_options': {'inner_M': M}}) print 'Residual', abs(residual(sol.x)).max() print 'Evaluations', count[0] return sol.x def main(): sol = solve(preconditioning=True) # visualize import matplotlib.pyplot as plt x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)] plt.clf() plt.pcolor(x, y, sol) plt.clim(0, 1) plt.colorbar() plt.show() if __name__ == "__main__": main()
bsd-3-clause
wooga/airflow
tests/providers/apache/hive/hooks/test_hive.py
1
34850
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import datetime import itertools import os import unittest from collections import OrderedDict, namedtuple from unittest import mock import pandas as pd from hmsclient import HMSClient from airflow.exceptions import AirflowException from airflow.models.connection import Connection from airflow.models.dag import DAG from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook, HiveServer2Hook from airflow.secrets.environment_variables import CONN_ENV_PREFIX from airflow.utils import timezone from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING from tests.test_utils.asserts import assert_equal_ignore_multiple_spaces from tests.test_utils.mock_hooks import MockHiveCliHook, MockHiveServer2Hook from tests.test_utils.mock_process import MockSubProcess DEFAULT_DATE = timezone.datetime(2015, 1, 1) DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat() DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10] class TestHiveEnvironment(unittest.TestCase): def setUp(self): self.next_day = (DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()[:10] self.database = 'airflow' self.partition_by = 'ds' self.table = 'static_babynames_partitioned' with mock.patch('airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook.get_metastore_client' ) as get_metastore_mock: get_metastore_mock.return_value = mock.MagicMock() self.hook = HiveMetastoreHook() class TestHiveCliHook(unittest.TestCase): @mock.patch('tempfile.tempdir', '/tmp/') @mock.patch('tempfile._RandomNameSequence.__next__') @mock.patch('subprocess.Popen') def test_run_cli(self, mock_popen, mock_temp_dir): mock_subprocess = MockSubProcess() mock_popen.return_value = mock_subprocess mock_temp_dir.return_value = "test_run_cli" with mock.patch.dict('os.environ', { 'AIRFLOW_CTX_DAG_ID': 'test_dag_id', 'AIRFLOW_CTX_TASK_ID': 'test_task_id', 'AIRFLOW_CTX_EXECUTION_DATE': '2015-01-01T00:00:00+00:00', 'AIRFLOW_CTX_DAG_RUN_ID': '55', 'AIRFLOW_CTX_DAG_OWNER': 'airflow', 'AIRFLOW_CTX_DAG_EMAIL': 'test@airflow.com', }): hook = MockHiveCliHook() hook.run_cli("SHOW DATABASES") hive_cmd = ['beeline', '-u', '"jdbc:hive2://localhost:10000/default"', '-hiveconf', 'airflow.ctx.dag_id=test_dag_id', '-hiveconf', 'airflow.ctx.task_id=test_task_id', '-hiveconf', 'airflow.ctx.execution_date=2015-01-01T00:00:00+00:00', '-hiveconf', 'airflow.ctx.dag_run_id=55', '-hiveconf', 'airflow.ctx.dag_owner=airflow', '-hiveconf', 'airflow.ctx.dag_email=test@airflow.com', '-hiveconf', 'mapreduce.job.queuename=airflow', '-hiveconf', 'mapred.job.queue.name=airflow', '-hiveconf', 'tez.queue.name=airflow', '-f', '/tmp/airflow_hiveop_test_run_cli/tmptest_run_cli'] mock_popen.assert_called_with( hive_cmd, stdout=mock_subprocess.PIPE, stderr=mock_subprocess.STDOUT, cwd="/tmp/airflow_hiveop_test_run_cli", close_fds=True ) @mock.patch('subprocess.Popen') def test_run_cli_with_hive_conf(self, mock_popen): hql = "set key;\n" \ "set airflow.ctx.dag_id;\nset airflow.ctx.dag_run_id;\n" \ "set airflow.ctx.task_id;\nset airflow.ctx.execution_date;\n" dag_id_ctx_var_name = \ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID']['env_var_format'] task_id_ctx_var_name = \ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID']['env_var_format'] execution_date_ctx_var_name = \ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][ 'env_var_format'] dag_run_id_ctx_var_name = \ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][ 'env_var_format'] mock_output = ['Connecting to jdbc:hive2://localhost:10000/default', 'log4j:WARN No appenders could be found for logger (org.apache.hive.jdbc.Utils).', 'log4j:WARN Please initialize the log4j system properly.', 'log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.', 'Connected to: Apache Hive (version 1.2.1.2.3.2.0-2950)', 'Driver: Hive JDBC (version 1.2.1.spark2)', 'Transaction isolation: TRANSACTION_REPEATABLE_READ', '0: jdbc:hive2://localhost:10000/default> USE default;', 'No rows affected (0.37 seconds)', '0: jdbc:hive2://localhost:10000/default> set key;', '+------------+--+', '| set |', '+------------+--+', '| key=value |', '+------------+--+', '1 row selected (0.133 seconds)', '0: jdbc:hive2://localhost:10000/default> set airflow.ctx.dag_id;', '+---------------------------------+--+', '| set |', '+---------------------------------+--+', '| airflow.ctx.dag_id=test_dag_id |', '+---------------------------------+--+', '1 row selected (0.008 seconds)', '0: jdbc:hive2://localhost:10000/default> set airflow.ctx.dag_run_id;', '+-----------------------------------------+--+', '| set |', '+-----------------------------------------+--+', '| airflow.ctx.dag_run_id=test_dag_run_id |', '+-----------------------------------------+--+', '1 row selected (0.007 seconds)', '0: jdbc:hive2://localhost:10000/default> set airflow.ctx.task_id;', '+-----------------------------------+--+', '| set |', '+-----------------------------------+--+', '| airflow.ctx.task_id=test_task_id |', '+-----------------------------------+--+', '1 row selected (0.009 seconds)', '0: jdbc:hive2://localhost:10000/default> set airflow.ctx.execution_date;', '+-------------------------------------------------+--+', '| set |', '+-------------------------------------------------+--+', '| airflow.ctx.execution_date=test_execution_date |', '+-------------------------------------------------+--+', '1 row selected (0.006 seconds)', '0: jdbc:hive2://localhost:10000/default> ', '0: jdbc:hive2://localhost:10000/default> ', 'Closing: 0: jdbc:hive2://localhost:10000/default', ''] with mock.patch.dict('os.environ', { dag_id_ctx_var_name: 'test_dag_id', task_id_ctx_var_name: 'test_task_id', execution_date_ctx_var_name: 'test_execution_date', dag_run_id_ctx_var_name: 'test_dag_run_id', }): hook = MockHiveCliHook() mock_popen.return_value = MockSubProcess(output=mock_output) output = hook.run_cli(hql=hql, hive_conf={'key': 'value'}) process_inputs = " ".join(mock_popen.call_args_list[0][0][0]) self.assertIn('value', process_inputs) self.assertIn('test_dag_id', process_inputs) self.assertIn('test_task_id', process_inputs) self.assertIn('test_execution_date', process_inputs) self.assertIn('test_dag_run_id', process_inputs) self.assertIn('value', output) self.assertIn('test_dag_id', output) self.assertIn('test_task_id', output) self.assertIn('test_execution_date', output) self.assertIn('test_dag_run_id', output) @mock.patch('airflow.providers.apache.hive.hooks.hive.HiveCliHook.run_cli') def test_load_file_without_create_table(self, mock_run_cli): filepath = "/path/to/input/file" table = "output_table" hook = MockHiveCliHook() hook.load_file(filepath=filepath, table=table, create=False) query = ( "LOAD DATA LOCAL INPATH '{filepath}' " "OVERWRITE INTO TABLE {table} ;\n" .format(filepath=filepath, table=table) ) calls = [ mock.call(query) ] mock_run_cli.assert_has_calls(calls, any_order=True) @mock.patch('airflow.providers.apache.hive.hooks.hive.HiveCliHook.run_cli') def test_load_file_create_table(self, mock_run_cli): filepath = "/path/to/input/file" table = "output_table" field_dict = OrderedDict([("name", "string"), ("gender", "string")]) fields = ",\n ".join( ['`{k}` {v}'.format(k=k.strip('`'), v=v) for k, v in field_dict.items()]) hook = MockHiveCliHook() hook.load_file(filepath=filepath, table=table, field_dict=field_dict, create=True, recreate=True) create_table = ( "DROP TABLE IF EXISTS {table};\n" "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n" "ROW FORMAT DELIMITED\n" "FIELDS TERMINATED BY ','\n" "STORED AS textfile\n;".format(table=table, fields=fields) ) load_data = ( "LOAD DATA LOCAL INPATH '{filepath}' " "OVERWRITE INTO TABLE {table} ;\n" .format(filepath=filepath, table=table) ) calls = [ mock.call(create_table), mock.call(load_data) ] mock_run_cli.assert_has_calls(calls, any_order=True) @mock.patch('airflow.providers.apache.hive.hooks.hive.HiveCliHook.load_file') @mock.patch('pandas.DataFrame.to_csv') def test_load_df(self, mock_to_csv, mock_load_file): df = pd.DataFrame({"c": ["foo", "bar", "baz"]}) table = "t" delimiter = "," encoding = "utf-8" hook = MockHiveCliHook() hook.load_df(df=df, table=table, delimiter=delimiter, encoding=encoding) assert mock_to_csv.call_count == 1 kwargs = mock_to_csv.call_args[1] self.assertEqual(kwargs["header"], False) self.assertEqual(kwargs["index"], False) self.assertEqual(kwargs["sep"], delimiter) assert mock_load_file.call_count == 1 kwargs = mock_load_file.call_args[1] self.assertEqual(kwargs["delimiter"], delimiter) self.assertEqual(kwargs["field_dict"], {"c": "STRING"}) self.assertTrue(isinstance(kwargs["field_dict"], OrderedDict)) self.assertEqual(kwargs["table"], table) @mock.patch('airflow.providers.apache.hive.hooks.hive.HiveCliHook.load_file') @mock.patch('pandas.DataFrame.to_csv') def test_load_df_with_optional_parameters(self, mock_to_csv, mock_load_file): hook = MockHiveCliHook() bools = (True, False) for create, recreate in itertools.product(bools, bools): mock_load_file.reset_mock() hook.load_df(df=pd.DataFrame({"c": range(0, 10)}), table="t", create=create, recreate=recreate) assert mock_load_file.call_count == 1 kwargs = mock_load_file.call_args[1] self.assertEqual(kwargs["create"], create) self.assertEqual(kwargs["recreate"], recreate) @mock.patch('airflow.providers.apache.hive.hooks.hive.HiveCliHook.run_cli') def test_load_df_with_data_types(self, mock_run_cli): ord_dict = OrderedDict() ord_dict['b'] = [True] ord_dict['i'] = [-1] ord_dict['t'] = [1] ord_dict['f'] = [0.0] ord_dict['c'] = ['c'] ord_dict['M'] = [datetime.datetime(2018, 1, 1)] ord_dict['O'] = [object()] ord_dict['S'] = [b'STRING'] ord_dict['U'] = ['STRING'] ord_dict['V'] = [None] df = pd.DataFrame(ord_dict) hook = MockHiveCliHook() hook.load_df(df, 't') query = """ CREATE TABLE IF NOT EXISTS t ( `b` BOOLEAN, `i` BIGINT, `t` BIGINT, `f` DOUBLE, `c` STRING, `M` TIMESTAMP, `O` STRING, `S` STRING, `U` STRING, `V` STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS textfile ; """ assert_equal_ignore_multiple_spaces( self, mock_run_cli.call_args_list[0][0][0], query) class TestHiveMetastoreHook(TestHiveEnvironment): VALID_FILTER_MAP = {'key2': 'value2'} def test_get_max_partition_from_empty_part_specs(self): max_partition = \ HiveMetastoreHook._get_max_partition_from_part_specs([], 'key1', self.VALID_FILTER_MAP) self.assertIsNone(max_partition) # @mock.patch('airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook', 'get_metastore_client') def test_get_max_partition_from_valid_part_specs_and_invalid_filter_map(self): with self.assertRaises(AirflowException): HiveMetastoreHook._get_max_partition_from_part_specs( [{'key1': 'value1', 'key2': 'value2'}, {'key1': 'value3', 'key2': 'value4'}], 'key1', {'key3': 'value5'}) def test_get_max_partition_from_valid_part_specs_and_invalid_partition_key(self): with self.assertRaises(AirflowException): HiveMetastoreHook._get_max_partition_from_part_specs( [{'key1': 'value1', 'key2': 'value2'}, {'key1': 'value3', 'key2': 'value4'}], 'key3', self.VALID_FILTER_MAP) def test_get_max_partition_from_valid_part_specs_and_none_partition_key(self): with self.assertRaises(AirflowException): HiveMetastoreHook._get_max_partition_from_part_specs( [{'key1': 'value1', 'key2': 'value2'}, {'key1': 'value3', 'key2': 'value4'}], None, self.VALID_FILTER_MAP) def test_get_max_partition_from_valid_part_specs_and_none_filter_map(self): max_partition = \ HiveMetastoreHook._get_max_partition_from_part_specs( [{'key1': 'value1', 'key2': 'value2'}, {'key1': 'value3', 'key2': 'value4'}], 'key1', None) # No partition will be filtered out. self.assertEqual(max_partition, b'value3') def test_get_max_partition_from_valid_part_specs(self): max_partition = \ HiveMetastoreHook._get_max_partition_from_part_specs( [{'key1': 'value1', 'key2': 'value2'}, {'key1': 'value3', 'key2': 'value4'}], 'key1', self.VALID_FILTER_MAP) self.assertEqual(max_partition, b'value1') @mock.patch("airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook.get_connection", return_value=[Connection(host="localhost", port="9802")]) @mock.patch("airflow.providers.apache.hive.hooks.hive.socket") def test_error_metastore_client(self, socket_mock, _find_valid_server_mock): socket_mock.socket.return_value.connect_ex.return_value = 0 self.hook.get_metastore_client() def test_get_conn(self): with mock.patch('airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook._find_valid_server' ) as find_valid_server: find_valid_server.return_value = mock.MagicMock(return_value={}) metastore_hook = HiveMetastoreHook() self.assertIsInstance(metastore_hook.get_conn(), HMSClient) def test_check_for_partition(self): # Check for existent partition. FakePartition = namedtuple('FakePartition', ['values']) fake_partition = FakePartition(['2015-01-01']) metastore = self.hook.metastore.__enter__() partition = "{p_by}='{date}'".format(date=DEFAULT_DATE_DS, p_by=self.partition_by) metastore.get_partitions_by_filter = mock.MagicMock( return_value=[fake_partition]) self.assertTrue( self.hook.check_for_partition(self.database, self.table, partition) ) metastore.get_partitions_by_filter( self.database, self.table, partition, 1) # Check for non-existent partition. missing_partition = "{p_by}='{date}'".format(date=self.next_day, p_by=self.partition_by) metastore.get_partitions_by_filter = mock.MagicMock(return_value=[]) self.assertFalse( self.hook.check_for_partition(self.database, self.table, missing_partition) ) metastore.get_partitions_by_filter.assert_called_with( self.database, self.table, missing_partition, 1) def test_check_for_named_partition(self): # Check for existing partition. partition = "{p_by}={date}".format(date=DEFAULT_DATE_DS, p_by=self.partition_by) self.hook.metastore.__enter__( ).check_for_named_partition = mock.MagicMock(return_value=True) self.assertTrue( self.hook.check_for_named_partition(self.database, self.table, partition)) self.hook.metastore.__enter__().check_for_named_partition.assert_called_with( self.database, self.table, partition) # Check for non-existent partition missing_partition = "{p_by}={date}".format(date=self.next_day, p_by=self.partition_by) self.hook.metastore.__enter__().check_for_named_partition = mock.MagicMock( return_value=False) self.assertFalse( self.hook.check_for_named_partition(self.database, self.table, missing_partition) ) self.hook.metastore.__enter__().check_for_named_partition.assert_called_with( self.database, self.table, missing_partition) def test_get_table(self): self.hook.metastore.__enter__().get_table = mock.MagicMock() self.hook.get_table(db=self.database, table_name=self.table) self.hook.metastore.__enter__().get_table.assert_called_with( dbname=self.database, tbl_name=self.table) def test_get_tables(self): # static_babynames_partitioned self.hook.metastore.__enter__().get_tables = mock.MagicMock( return_value=['static_babynames_partitioned']) self.hook.get_tables(db=self.database, pattern=self.table + "*") self.hook.metastore.__enter__().get_tables.assert_called_with( db_name='airflow', pattern='static_babynames_partitioned*') self.hook.metastore.__enter__().get_table_objects_by_name.assert_called_with( 'airflow', ['static_babynames_partitioned']) def test_get_databases(self): metastore = self.hook.metastore.__enter__() metastore.get_databases = mock.MagicMock() self.hook.get_databases(pattern='*') metastore.get_databases.assert_called_with('*') def test_get_partitions(self): FakeFieldSchema = namedtuple('FakeFieldSchema', ['name']) fake_schema = FakeFieldSchema('ds') FakeTable = namedtuple('FakeTable', ['partitionKeys']) fake_table = FakeTable([fake_schema]) FakePartition = namedtuple('FakePartition', ['values']) fake_partition = FakePartition(['2015-01-01']) metastore = self.hook.metastore.__enter__() metastore.get_table = mock.MagicMock(return_value=fake_table) metastore.get_partitions = mock.MagicMock( return_value=[fake_partition]) partitions = self.hook.get_partitions(schema=self.database, table_name=self.table) self.assertEqual(len(partitions), 1) self.assertEqual(partitions, [{self.partition_by: DEFAULT_DATE_DS}]) metastore.get_table.assert_called_with( dbname=self.database, tbl_name=self.table) metastore.get_partitions.assert_called_with( db_name=self.database, tbl_name=self.table, max_parts=HiveMetastoreHook.MAX_PART_COUNT) def test_max_partition(self): FakeFieldSchema = namedtuple('FakeFieldSchema', ['name']) fake_schema = FakeFieldSchema('ds') FakeTable = namedtuple('FakeTable', ['partitionKeys']) fake_table = FakeTable([fake_schema]) metastore = self.hook.metastore.__enter__() metastore.get_table = mock.MagicMock(return_value=fake_table) metastore.get_partition_names = mock.MagicMock( return_value=['ds=2015-01-01']) metastore.partition_name_to_spec = mock.MagicMock( return_value={'ds': '2015-01-01'}) filter_map = {self.partition_by: DEFAULT_DATE_DS} partition = self.hook.max_partition(schema=self.database, table_name=self.table, field=self.partition_by, filter_map=filter_map) self.assertEqual(partition, DEFAULT_DATE_DS.encode('utf-8')) metastore.get_table.assert_called_with( dbname=self.database, tbl_name=self.table) metastore.get_partition_names.assert_called_with( self.database, self.table, max_parts=HiveMetastoreHook.MAX_PART_COUNT) metastore.partition_name_to_spec.assert_called_with('ds=2015-01-01') def test_table_exists(self): # Test with existent table. self.hook.metastore.__enter__().get_table = mock.MagicMock(return_value=True) self.assertTrue(self.hook.table_exists(self.table, db=self.database)) self.hook.metastore.__enter__().get_table.assert_called_with( dbname='airflow', tbl_name='static_babynames_partitioned') # Test with non-existent table. self.hook.metastore.__enter__().get_table = mock.MagicMock(side_effect=Exception()) self.assertFalse( self.hook.table_exists("does-not-exist") ) self.hook.metastore.__enter__().get_table.assert_called_with( dbname='default', tbl_name='does-not-exist') class TestHiveServer2Hook(unittest.TestCase): def _upload_dataframe(self): df = pd.DataFrame({'a': [1, 2], 'b': [1, 2]}) self.local_path = '/tmp/TestHiveServer2Hook.csv' df.to_csv(self.local_path, header=False, index=False) def setUp(self): self._upload_dataframe() args = {'owner': 'airflow', 'start_date': DEFAULT_DATE} self.dag = DAG('test_dag_id', default_args=args) self.database = 'airflow' self.table = 'hive_server_hook' self.hql = """ CREATE DATABASE IF NOT EXISTS {{ params.database }}; USE {{ params.database }}; DROP TABLE IF EXISTS {{ params.table }}; CREATE TABLE IF NOT EXISTS {{ params.table }} ( a int, b int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ','; LOAD DATA LOCAL INPATH '{{ params.csv_path }}' OVERWRITE INTO TABLE {{ params.table }}; """ self.columns = ['{}.a'.format(self.table), '{}.b'.format(self.table)] with mock.patch('airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook.get_metastore_client' ) as get_metastore_mock: get_metastore_mock.return_value = mock.MagicMock() self.hook = HiveMetastoreHook() def test_get_conn(self): hook = MockHiveServer2Hook() hook.get_conn() @mock.patch('pyhive.hive.connect') def test_get_conn_with_password(self, mock_connect): conn_id = "conn_with_password" conn_env = CONN_ENV_PREFIX + conn_id.upper() with mock.patch.dict( 'os.environ', {conn_env: "jdbc+hive2://conn_id:conn_pass@localhost:10000/default?authMechanism=LDAP"} ): HiveServer2Hook(hiveserver2_conn_id=conn_id).get_conn() mock_connect.assert_called_once_with( host='localhost', port=10000, auth='LDAP', kerberos_service_name=None, username='conn_id', password='conn_pass', database='default') def test_get_records(self): hook = MockHiveServer2Hook() query = "SELECT * FROM {}".format(self.table) with mock.patch.dict('os.environ', { 'AIRFLOW_CTX_DAG_ID': 'test_dag_id', 'AIRFLOW_CTX_TASK_ID': 'HiveHook_3835', 'AIRFLOW_CTX_EXECUTION_DATE': '2015-01-01T00:00:00+00:00', 'AIRFLOW_CTX_DAG_RUN_ID': '55', 'AIRFLOW_CTX_DAG_OWNER': 'airflow', 'AIRFLOW_CTX_DAG_EMAIL': 'test@airflow.com', }): results = hook.get_records(query, schema=self.database) self.assertListEqual(results, [(1, 1), (2, 2)]) hook.get_conn.assert_called_with(self.database) hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.dag_id=test_dag_id') hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.task_id=HiveHook_3835') hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.execution_date=2015-01-01T00:00:00+00:00') hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.dag_run_id=55') hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.dag_owner=airflow') hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.dag_email=test@airflow.com') def test_get_pandas_df(self): hook = MockHiveServer2Hook() query = "SELECT * FROM {}".format(self.table) with mock.patch.dict('os.environ', { 'AIRFLOW_CTX_DAG_ID': 'test_dag_id', 'AIRFLOW_CTX_TASK_ID': 'HiveHook_3835', 'AIRFLOW_CTX_EXECUTION_DATE': '2015-01-01T00:00:00+00:00', 'AIRFLOW_CTX_DAG_RUN_ID': '55', 'AIRFLOW_CTX_DAG_OWNER': 'airflow', 'AIRFLOW_CTX_DAG_EMAIL': 'test@airflow.com', }): df = hook.get_pandas_df(query, schema=self.database) self.assertEqual(len(df), 2) self.assertListEqual(df["hive_server_hook.a"].values.tolist(), [1, 2]) hook.get_conn.assert_called_with(self.database) hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.dag_id=test_dag_id') hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.task_id=HiveHook_3835') hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.execution_date=2015-01-01T00:00:00+00:00') hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.dag_run_id=55') hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.dag_owner=airflow') hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.dag_email=test@airflow.com') def test_get_results_header(self): hook = MockHiveServer2Hook() query = "SELECT * FROM {}".format(self.table) results = hook.get_results(query, schema=self.database) self.assertListEqual([col[0] for col in results['header']], self.columns) def test_get_results_data(self): hook = MockHiveServer2Hook() query = "SELECT * FROM {}".format(self.table) results = hook.get_results(query, schema=self.database) self.assertListEqual(results['data'], [(1, 1), (2, 2)]) def test_to_csv(self): hook = MockHiveServer2Hook() hook._get_results = mock.MagicMock(return_value=iter([ [ ('hive_server_hook.a', 'INT_TYPE', None, None, None, None, True), ('hive_server_hook.b', 'INT_TYPE', None, None, None, None, True) ], (1, 1), (2, 2) ])) query = "SELECT * FROM {}".format(self.table) csv_filepath = 'query_results.csv' hook.to_csv(query, csv_filepath, schema=self.database, delimiter=',', lineterminator='\n', output_header=True, fetch_size=2) df = pd.read_csv(csv_filepath, sep=',') self.assertListEqual(df.columns.tolist(), self.columns) self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2]) self.assertEqual(len(df), 2) def test_multi_statements(self): sqls = [ "CREATE TABLE IF NOT EXISTS test_multi_statements (i INT)", "SELECT * FROM {}".format(self.table), "DROP TABLE test_multi_statements", ] hook = MockHiveServer2Hook() with mock.patch.dict('os.environ', { 'AIRFLOW_CTX_DAG_ID': 'test_dag_id', 'AIRFLOW_CTX_TASK_ID': 'HiveHook_3835', 'AIRFLOW_CTX_EXECUTION_DATE': '2015-01-01T00:00:00+00:00', 'AIRFLOW_CTX_DAG_RUN_ID': '55', 'AIRFLOW_CTX_DAG_OWNER': 'airflow', 'AIRFLOW_CTX_DAG_EMAIL': 'test@airflow.com', }): # df = hook.get_pandas_df(query, schema=self.database) results = hook.get_records(sqls, schema=self.database) self.assertListEqual(results, [(1, 1), (2, 2)]) # self.assertEqual(len(df), 2) # self.assertListEqual(df["hive_server_hook.a"].values.tolist(), [1, 2]) hook.get_conn.assert_called_with(self.database) hook.mock_cursor.execute.assert_any_call( 'CREATE TABLE IF NOT EXISTS test_multi_statements (i INT)') hook.mock_cursor.execute.assert_any_call( 'SELECT * FROM {}'.format(self.table)) hook.mock_cursor.execute.assert_any_call( 'DROP TABLE test_multi_statements') hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.dag_id=test_dag_id') hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.task_id=HiveHook_3835') hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.execution_date=2015-01-01T00:00:00+00:00') hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.dag_run_id=55') hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.dag_owner=airflow') hook.mock_cursor.execute.assert_any_call( 'set airflow.ctx.dag_email=test@airflow.com') def test_get_results_with_hive_conf(self): hql = ["set key", "set airflow.ctx.dag_id", "set airflow.ctx.dag_run_id", "set airflow.ctx.task_id", "set airflow.ctx.execution_date"] dag_id_ctx_var_name = \ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID']['env_var_format'] task_id_ctx_var_name = \ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID']['env_var_format'] execution_date_ctx_var_name = \ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][ 'env_var_format'] dag_run_id_ctx_var_name = \ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][ 'env_var_format'] with mock.patch.dict('os.environ', { dag_id_ctx_var_name: 'test_dag_id', task_id_ctx_var_name: 'test_task_id', execution_date_ctx_var_name: 'test_execution_date', dag_run_id_ctx_var_name: 'test_dag_run_id', }): hook = MockHiveServer2Hook() hook._get_results = mock.MagicMock(return_value=iter( ["header", ("value", "test"), ("test_dag_id", "test"), ("test_task_id", "test"), ("test_execution_date", "test"), ("test_dag_run_id", "test")] )) output = '\n'.join(res_tuple[0] for res_tuple in hook.get_results( hql=hql, hive_conf={'key': 'value'})['data']) self.assertIn('value', output) self.assertIn('test_dag_id', output) self.assertIn('test_task_id', output) self.assertIn('test_execution_date', output) self.assertIn('test_dag_run_id', output) class TestHiveCli(unittest.TestCase): def setUp(self): self.nondefault_schema = "nondefault" os.environ["AIRFLOW__CORE__SECURITY"] = "kerberos" def tearDown(self): del os.environ["AIRFLOW__CORE__SECURITY"] def test_get_proxy_user_value(self): hook = MockHiveCliHook() returner = mock.MagicMock() returner.extra_dejson = {'proxy_user': 'a_user_proxy'} hook.use_beeline = True hook.conn = returner # Run result = hook._prepare_cli_cmd() # Verify self.assertIn('hive.server2.proxy.user=a_user_proxy', result[2])
apache-2.0
joernhees/scikit-learn
examples/exercises/plot_iris_exercise.py
31
1622
""" ================================ SVM Exercise ================================ A tutorial exercise for using different SVM kernels. This exercise is used in the :ref:`using_kernels_tut` part of the :ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import datasets, svm iris = datasets.load_iris() X = iris.data y = iris.target X = X[y != 0, :2] y = y[y != 0] n_sample = len(X) np.random.seed(0) order = np.random.permutation(n_sample) X = X[order] y = y[order].astype(np.float) X_train = X[:int(.9 * n_sample)] y_train = y[:int(.9 * n_sample)] X_test = X[int(.9 * n_sample):] y_test = y[int(.9 * n_sample):] # fit the model for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')): clf = svm.SVC(kernel=kernel, gamma=10) clf.fit(X_train, y_train) plt.figure(fig_num) plt.clf() plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired) # Circle out the test data plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10) plt.axis('tight') x_min = X[:, 0].min() x_max = X[:, 0].max() y_min = X[:, 1].min() y_max = X[:, 1].max() XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j] Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()]) # Put the result into a color plot Z = Z.reshape(XX.shape) plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired) plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'], levels=[-.5, 0, .5]) plt.title(kernel) plt.show()
bsd-3-clause
evanbiederstedt/CMBintheLikeHoodz
patchwork/Patchwork_nside512_ex1.py
1
22119
from __future__ import (division, print_function, absolute_import) # In[2]: #get_ipython().magic(u'matplotlib inline') import math import matplotlib.pyplot as plt import numpy as np import healpy as hp import pyfits as pf import astropy as ap import os from scipy.special import eval_legendre ##special scipy function os.getcwd() os.chdir('/Users/evanbiederstedt/downloads') # In[3]: # Pixel 42 : [ 0.612372, 0.612372, 0.500000 ] # Pixel 57 : [ 0.783917, 0.523797, 0.333333 ] # Pixel 58 : [ 0.523797, 0.783917, 0.333333 ] # Pixel 74 : [ 0.697217, 0.697217, 0.166667 ] # In[4]: # # We define this pixel patch # # Note: We have to put into IDL format for 3D vectors, i.e. # HDIL> query_polygon, 512L, [[0.612372, 0.783917, 0.523797, 0.697217], # [0.612372, 0.523797, 0.783917, 0.697217], [0.500000, 0.333333, 0.333333, 0.166667]], listpix3, nlist3 # # In[5]: # # Now, save IDL .sav file of listpix3 # Import into Python and run # # In[6]: # http://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.io.readsav.html # http://www.astrobetter.com/blog/2009/11/24/read-idl-save-files-into-python/ # In[7]: import scipy # In[8]: # # scipy.io.readsav # # scipy.io.readsav(file_name, idict=None, python_dict=False, uncompressed_file_name=None, verbose=False)[source] # # Read an IDL .sav file # # # In[10]: import scipy.io # In[11]: patch_file = scipy.io.readsav('patch_listpix3.sav') # In[12]: type(patch_file) # In[13]: arr3 = patch_file['listpix3'] print(arr3) # In[14]: type(arr3) # In[15]: print(len(arr3)) # pixels total 12476 # In[16]: camb_map512 = "camb_map_nside512.fits" # In[17]: camb_map512 # In[18]: nside=512 npix = 12*(nside**2) #total number of pixels, npix LMAX = ((2*nside)) #maximum l of the power spectrum C_l heal_npix = hp.nside2npix(nside) # Healpix calculated npix print("The total number of pixels is " + str(npix)) print("The maximum ell of the power spectrum C_l set to lmax = 2*nside " +str(LMAX)) print("Healpix tells me total number of pixels npix is equal to " + str(heal_npix)) # In[19]: mapread_camb512 = hp.read_map(camb_map512) #hp.mollview(mapread_camb512) # In[20]: # rename array for convenience tempval = mapread_camb512 #print tempval # Data: # tempval # the array of pixel values, (3145728,) # In[21]: print(len(tempval)) print(tempval.shape) # In[22]: # # We only wish to use the pixels defined in our patch # These pixel indices are listed in arr3 such that total number pixels total 12476 # # arr3: this defines pixel indices within patch # # To access pixel indices within array of CMB pixels, just use tempval[arr3] # # In[23]: print(len(tempval[arr3])) # In[24]: # The log-likelihood # # -2lnL \propto m^T C^-1 m + ln det C + N ln (2pi) # # First term, m^T C^-1 m is the "model fit term" # Second term, lndetC is the "complexity penalty" # Third term, N ln 2pi, a constant # # m = tempval # C = Sij # In[25]: m = tempval[arr3] # In[26]: # Next, create the matrix, n_i /cdot n_j # solely using Healpy routines, i.e. taking the dot product of the vectors # The result is "dotproductmatrix" # In[27]: npix # In[28]: nside # In[29]: ## healpy.pixelfunc.pix2vec(nside, ipix, nest=False) ## ## will give three arrays ## arrays of all x values, all y values, all z values ## RING scheme default # len()=3 # type()=tuple # In[30]: vecval = hp.pix2vec(nside, arr3) #Nside = 512, type()=tuple # In[31]: len(vecval) # In[32]: vecvalx = vecval[0] #len() = 12476 vecvaly = vecval[1] vecvalz = vecval[2] # In[33]: # First arrange arrays vertically # numpy.vstack = Stack arrays in sequence vertically (row wise), input sequence of arrays totalvecval = np.vstack((vecvalx, vecvaly, vecvalz)) #type()=numpy.ndarray # In[34]: trans = totalvecval.T #transpose # In[35]: dotproductmatrix = trans.dot(totalvecval) #take the dot product # dotproductmatrix.shape = (npix, npix) = (12476, 12476) # type(dotproductmatrix) = np.ndarray # In[36]: # # The following procedure is for the angular power spectrum, C^th_ell # However, we are using some cosmological parameter, /alpha # # # ========================================================= # ========================================================= # # \Sum_l (2*l + 1)/4pi C^th_l P_l (dotproductmatrix) # sum from l=2 to l=lmax # # arrays l = [2 3 4 .... lmax] # C_l = [C_2 C_3 .... C_lmax] # # The correct way to do the summation: # # Step 1: calculate the matrix # M = dotproductmatrix # # Step 2: evaluate the function P_l(x) for each entry of the matrix # OUTPUT: [P_2(M) P_3(M) P_4(M) .... P_lmax(M) ] # # Step 3: (2*l +1)/4pi from l=2 to l=lmax # [5/4pi 7/4pi 9/4pi 11/4pi .... 65/4pi ] # # Step 4: multiply # [5/4pi*P_2(M) + 7/4pi*P_3(M) +...... + 65/4pi*P_32(M)] # # # Step 5: multiply by theoretical CAMB values, [C_2 C_3 C_31 C_32] # [5/4pi**C_2* P_2(M) + 7/4pi*C_3* P_3(M) +...... + 65/4pi*C_32* P_32(M)] # # Step 6: This is an array of S_ij for each theory C_l, l=2 to l=32 # # # # ========================================================= # ========================================================= # In[37]: print(dotproductmatrix) # In[38]: # # Let's first just take l_max = nside # so, that's lmax = 512 # # In[39]: # For lmax = 512, we must create an array of ell values, i.e. [0 1 2 3....31 32] ell = np.arange(513) print(ell) # # Subtract the monopole and dipole, l=0, l=1 ellval = ell[2:] print(ellval) # In[40]: # The correct way to do the summation: # # Step 1: calculate the matrix # M = dotproductmatrix # # Step 2: evaluate the function P_l(x) for each entry of the matrix # OUTPUT: [P_2(M) P_3(M) P_4(M) .... P_lmax(M) ] # # Step 3: (2*l +1)/4pi from l=2 to l=lmax # [5/4pi 7/4pi 9/4pi 11/4pi .... 65/4pi ] # # Step 4: multiply # [5/4pi*P_2(M) + 7/4pi*P_3(M) +...... + 65/4pi*P_32(M)] # # # Step 5: multiply by theoretical CAMB values, [C_2 C_3 C_31 C_32] # [5/4pi**C_2* P_2(M) + 7/4pi*C_3* P_3(M) +...... + 65/4pi*C_32* P_32(M)] # # Step 6: This is an array of S_ij for each theory C_l, l=2 to l=32 # # In[41]: dotproductmatrix.shape # In[42]: # Step 1: calculate the matrix M = dotproductmatrix # In[43]: # Step 2: evaluate the function P_l(x) for each entry of the matrix # OUTPUT: [P_2(M) P_3(M) P_4(M) .... P_lmax(M) ] # In[44]: # CODE BOTTLENECK! # # Evaluate Legendre from l=2 to l=lmax for each matrix entry # [P_2(M) P_3(M) P_4(M) .... P_lmax(M) ] # # WITHOUT BROADCASTING, one would do something like # PlMat = [] # for i in ellval: # PlMat.append( eval_legendre(i, dotproductmatrix) ) # # # With broadcasting, we use # PlMat = eval_legendre(ellval[:, None, None], dotproductmatrix) # PlMat = [P_2(M) P_3(M) P_4(M) .... P_lmax(M) ] # PlMat is an array, len()=31 of 31 3072 by 3072 matrices # PlMat.shape = (31, 3072, 3072) # In[45]: #This doesn't run for lmax=512 #So, split 'ellval' into ten arrays and then sum afterwards splitell = np.array_split(ellval, 150) splitell[0] # In[46]: PlMat1 = eval_legendre(splitell[0][:, None, None], dotproductmatrix) # In[47]: PlMat2 = eval_legendre(splitell[1][:, None, None], dotproductmatrix) # In[48]: PlMat3 = eval_legendre(splitell[2][:, None, None], dotproductmatrix) # In[49]: PlMat4 = eval_legendre(splitell[3][:, None, None], dotproductmatrix) # In[50]: PlMat5 = eval_legendre(splitell[4][:, None, None], dotproductmatrix) # In[51]: PlMat6 = eval_legendre(splitell[5][:, None, None], dotproductmatrix) # In[ ]: PlMat7 = eval_legendre(splitell[6][:, None, None], dotproductmatrix) # In[ ]: PlMat8 = eval_legendre(splitell[7][:, None, None], dotproductmatrix) # In[ ]: PlMat9 = eval_legendre(splitell[8][:, None, None], dotproductmatrix) # In[ ]: # In[ ]: PlMat10 = eval_legendre(splitell[9][:, None, None], dotproductmatrix) # In[ ]: PlMat11 = eval_legendre(splitell[10][:, None, None], dotproductmatrix) # In[ ]: PlMat12 = eval_legendre(splitell[11][:, None, None], dotproductmatrix) # In[ ]: PlMat13 = eval_legendre(splitell[12][:, None, None], dotproductmatrix) # In[ ]: PlMat14 = eval_legendre(splitell[13][:, None, None], dotproductmatrix) # In[ ]: PlMat15 = eval_legendre(splitell[14][:, None, None], dotproductmatrix) # In[ ]: PlMat16 = eval_legendre(splitell[15][:, None, None], dotproductmatrix) # In[ ]: PlMat17 = eval_legendre(splitell[16][:, None, None], dotproductmatrix) # In[ ]: PlMat18 = eval_legendre(splitell[17][:, None, None], dotproductmatrix) # In[ ]: PlMat19 = eval_legendre(splitell[18][:, None, None], dotproductmatrix) # In[ ]: PlMat20 = eval_legendre(splitell[19][:, None, None], dotproductmatrix) # In[ ]: PlMat21 = eval_legendre(splitell[20][:, None, None], dotproductmatrix) # In[ ]: PlMat22 = eval_legendre(splitell[21][:, None, None], dotproductmatrix) # In[ ]: PlMat23 = eval_legendre(splitell[22][:, None, None], dotproductmatrix) # In[ ]: PlMat24 = eval_legendre(splitell[23][:, None, None], dotproductmatrix) # In[ ]: PlMat25 = eval_legendre(splitell[24][:, None, None], dotproductmatrix) # In[ ]: PlMat26 = eval_legendre(splitell[25][:, None, None], dotproductmatrix) # In[ ]: PlMat27 = eval_legendre(splitell[26][:, None, None], dotproductmatrix) # In[ ]: PlMat28 = eval_legendre(splitell[27][:, None, None], dotproductmatrix) # In[ ]: PlMat29 = eval_legendre(splitell[28][:, None, None], dotproductmatrix) # In[ ]: PlMat30 = eval_legendre(splitell[29][:, None, None], dotproductmatrix) # In[ ]: PlMat31 = eval_legendre(splitell[30][:, None, None], dotproductmatrix) # In[ ]: PlMat32 = eval_legendre(splitell[31][:, None, None], dotproductmatrix) # In[ ]: PlMat33 = eval_legendre(splitell[32][:, None, None], dotproductmatrix) # In[ ]: PlMat34 = eval_legendre(splitell[33][:, None, None], dotproductmatrix) # In[ ]: PlMat35 = eval_legendre(splitell[34][:, None, None], dotproductmatrix) # In[ ]: PlMat36 = eval_legendre(splitell[35][:, None, None], dotproductmatrix) # In[ ]: PlMat37 = eval_legendre(splitell[36][:, None, None], dotproductmatrix) # In[ ]: PlMat38 = eval_legendre(splitell[37][:, None, None], dotproductmatrix) # In[ ]: PlMat39 = eval_legendre(splitell[38][:, None, None], dotproductmatrix) # In[ ]: PlMat40 = eval_legendre(splitell[39][:, None, None], dotproductmatrix) # In[ ]: PlMat41 = eval_legendre(splitell[40][:, None, None], dotproductmatrix) # In[ ]: PlMat42 = eval_legendre(splitell[41][:, None, None], dotproductmatrix) # In[ ]: PlMat43 = eval_legendre(splitell[42][:, None, None], dotproductmatrix) # In[ ]: PlMat44 = eval_legendre(splitell[43][:, None, None], dotproductmatrix) # In[ ]: PlMat45 = eval_legendre(splitell[44][:, None, None], dotproductmatrix) # In[ ]: PlMat46 = eval_legendre(splitell[45][:, None, None], dotproductmatrix) # In[ ]: PlMat47 = eval_legendre(splitell[46][:, None, None], dotproductmatrix) # In[ ]: PlMat48 = eval_legendre(splitell[47][:, None, None], dotproductmatrix) # In[ ]: PlMat49 = eval_legendre(splitell[48][:, None, None], dotproductmatrix) # In[ ]: PlMat50 = eval_legendre(splitell[49][:, None, None], dotproductmatrix) # In[ ]: PlMat51 = eval_legendre(splitell[50][:, None, None], dotproductmatrix) # In[ ]: PlMat52 = eval_legendre(splitell[51][:, None, None], dotproductmatrix) # In[ ]: PlMat53 = eval_legendre(splitell[52][:, None, None], dotproductmatrix) # In[ ]: PlMat54 = eval_legendre(splitell[53][:, None, None], dotproductmatrix) # In[ ]: PlMat55 = eval_legendre(splitell[54][:, None, None], dotproductmatrix) # In[ ]: PlMat56 = eval_legendre(splitell[55][:, None, None], dotproductmatrix) # In[ ]: PlMat57 = eval_legendre(splitell[56][:, None, None], dotproductmatrix) # In[ ]: PlMat58 = eval_legendre(splitell[57][:, None, None], dotproductmatrix) # In[ ]: PlMat59 = eval_legendre(splitell[58][:, None, None], dotproductmatrix) # In[ ]: PlMat60 = eval_legendre(splitell[59][:, None, None], dotproductmatrix) # In[ ]: PlMat61 = eval_legendre(splitell[60][:, None, None], dotproductmatrix) # In[ ]: PlMat62 = eval_legendre(splitell[61][:, None, None], dotproductmatrix) # In[ ]: PlMat63 = eval_legendre(splitell[62][:, None, None], dotproductmatrix) # In[ ]: PlMat64 = eval_legendre(splitell[63][:, None, None], dotproductmatrix) # In[ ]: PlMat65 = eval_legendre(splitell[64][:, None, None], dotproductmatrix) # In[ ]: PlMat66 = eval_legendre(splitell[65][:, None, None], dotproductmatrix) # In[ ]: PlMat67 = eval_legendre(splitell[66][:, None, None], dotproductmatrix) # In[ ]: PlMat68 = eval_legendre(splitell[67][:, None, None], dotproductmatrix) # In[ ]: PlMat69 = eval_legendre(splitell[68][:, None, None], dotproductmatrix) # In[ ]: PlMat70 = eval_legendre(splitell[69][:, None, None], dotproductmatrix) # In[ ]: PlMat71 = eval_legendre(splitell[70][:, None, None], dotproductmatrix) # In[ ]: PlMat72 = eval_legendre(splitell[71][:, None, None], dotproductmatrix) # In[ ]: PlMat73 = eval_legendre(splitell[72][:, None, None], dotproductmatrix) # In[ ]: PlMat74 = eval_legendre(splitell[73][:, None, None], dotproductmatrix) # In[ ]: PlMat75 = eval_legendre(splitell[74][:, None, None], dotproductmatrix) # In[ ]: PlMat76 = eval_legendre(splitell[75][:, None, None], dotproductmatrix) # In[ ]: PlMat77 = eval_legendre(splitell[76][:, None, None], dotproductmatrix) # In[ ]: PlMat78 = eval_legendre(splitell[77][:, None, None], dotproductmatrix) # In[ ]: PlMat79 = eval_legendre(splitell[78][:, None, None], dotproductmatrix) # In[ ]: PlMat80 = eval_legendre(splitell[79][:, None, None], dotproductmatrix) # In[ ]: PlMat81 = eval_legendre(splitell[80][:, None, None], dotproductmatrix) # In[ ]: PlMat82 = eval_legendre(splitell[81][:, None, None], dotproductmatrix) # In[ ]: PlMat83 = eval_legendre(splitell[82][:, None, None], dotproductmatrix) # In[ ]: PlMat84 = eval_legendre(splitell[83][:, None, None], dotproductmatrix) # In[ ]: PlMat85 = eval_legendre(splitell[84][:, None, None], dotproductmatrix) # In[ ]: PlMat86 = eval_legendre(splitell[85][:, None, None], dotproductmatrix) # In[ ]: PlMat87 = eval_legendre(splitell[86][:, None, None], dotproductmatrix) # In[ ]: PlMat88 = eval_legendre(splitell[87][:, None, None], dotproductmatrix) # In[ ]: PlMat89 = eval_legendre(splitell[88][:, None, None], dotproductmatrix) # In[ ]: PlMat90 = eval_legendre(splitell[89][:, None, None], dotproductmatrix) # In[ ]: PlMat91 = eval_legendre(splitell[90][:, None, None], dotproductmatrix) # In[ ]: PlMat92 = eval_legendre(splitell[91][:, None, None], dotproductmatrix) # In[ ]: PlMat93 = eval_legendre(splitell[92][:, None, None], dotproductmatrix) # In[ ]: PlMat94 = eval_legendre(splitell[93][:, None, None], dotproductmatrix) # In[ ]: PlMat95 = eval_legendre(splitell[94][:, None, None], dotproductmatrix) # In[ ]: PlMat96 = eval_legendre(splitell[95][:, None, None], dotproductmatrix) # In[ ]: PlMat97 = eval_legendre(splitell[96][:, None, None], dotproductmatrix) # In[ ]: PlMat98 = eval_legendre(splitell[97][:, None, None], dotproductmatrix) # In[ ]: PlMat99 = eval_legendre(splitell[98][:, None, None], dotproductmatrix) # In[ ]: PlMat100 = eval_legendre(splitell[99][:, None, None], dotproductmatrix) # In[ ]: PlMat101 = eval_legendre(splitell[100][:, None, None], dotproductmatrix) # In[ ]: PlMat102 = eval_legendre(splitell[101][:, None, None], dotproductmatrix) # In[ ]: PlMat103 = eval_legendre(splitell[102][:, None, None], dotproductmatrix) # In[ ]: PlMat104 = eval_legendre(splitell[103][:, None, None], dotproductmatrix) # In[ ]: PlMat105 = eval_legendre(splitell[104][:, None, None], dotproductmatrix) # In[ ]: PlMat106 = eval_legendre(splitell[105][:, None, None], dotproductmatrix) # In[ ]: PlMat107 = eval_legendre(splitell[106][:, None, None], dotproductmatrix) # In[ ]: PlMat108 = eval_legendre(splitell[107][:, None, None], dotproductmatrix) # In[ ]: PlMat109 = eval_legendre(splitell[108][:, None, None], dotproductmatrix) # In[ ]: PlMat110 = eval_legendre(splitell[109][:, None, None], dotproductmatrix) # In[ ]: PlMat111 = eval_legendre(splitell[110][:, None, None], dotproductmatrix) # In[ ]: PlMat112 = eval_legendre(splitell[111][:, None, None], dotproductmatrix) # In[ ]: PlMat113 = eval_legendre(splitell[112][:, None, None], dotproductmatrix) # In[ ]: PlMat114 = eval_legendre(splitell[113][:, None, None], dotproductmatrix) # In[ ]: PlMat115 = eval_legendre(splitell[114][:, None, None], dotproductmatrix) # In[ ]: PlMat116 = eval_legendre(splitell[115][:, None, None], dotproductmatrix) # In[ ]: PlMat117 = eval_legendre(splitell[116][:, None, None], dotproductmatrix) # In[ ]: PlMat118 = eval_legendre(splitell[117][:, None, None], dotproductmatrix) # In[ ]: PlMat119 = eval_legendre(splitell[118][:, None, None], dotproductmatrix) # In[ ]: PlMat120 = eval_legendre(splitell[119][:, None, None], dotproductmatrix) # In[ ]: PlMat121 = eval_legendre(splitell[120][:, None, None], dotproductmatrix) # In[ ]: PlMat122 = eval_legendre(splitell[121][:, None, None], dotproductmatrix) # In[ ]: PlMat123 = eval_legendre(splitell[122][:, None, None], dotproductmatrix) # In[ ]: PlMat124 = eval_legendre(splitell[123][:, None, None], dotproductmatrix) # In[ ]: PlMat125 = eval_legendre(splitell[124][:, None, None], dotproductmatrix) # In[ ]: PlMat126 = eval_legendre(splitell[125][:, None, None], dotproductmatrix) # In[ ]: PlMat127 = eval_legendre(splitell[126][:, None, None], dotproductmatrix) # In[ ]: PlMat128 = eval_legendre(splitell[127][:, None, None], dotproductmatrix) # In[ ]: PlMat129 = eval_legendre(splitell[128][:, None, None], dotproductmatrix) # In[ ]: PlMat130 = eval_legendre(splitell[129][:, None, None], dotproductmatrix) # In[ ]: PlMat131 = eval_legendre(splitell[130][:, None, None], dotproductmatrix) # In[ ]: PlMat132 = eval_legendre(splitell[131][:, None, None], dotproductmatrix) # In[ ]: PlMat133 = eval_legendre(splitell[132][:, None, None], dotproductmatrix) # In[ ]: PlMat134 = eval_legendre(splitell[133][:, None, None], dotproductmatrix) # In[ ]: PlMat135 = eval_legendre(splitell[134][:, None, None], dotproductmatrix) # In[ ]: PlMat136 = eval_legendre(splitell[135][:, None, None], dotproductmatrix) # In[ ]: PlMat137 = eval_legendre(splitell[136][:, None, None], dotproductmatrix) # In[ ]: PlMat138 = eval_legendre(splitell[137][:, None, None], dotproductmatrix) # In[ ]: PlMat139 = eval_legendre(splitell[138][:, None, None], dotproductmatrix) # In[ ]: PlMat140 = eval_legendre(splitell[139][:, None, None], dotproductmatrix) # In[ ]: PlMat141 = eval_legendre(splitell[140][:, None, None], dotproductmatrix) # In[ ]: PlMat142 = eval_legendre(splitell[141][:, None, None], dotproductmatrix) # In[ ]: PlMat143 = eval_legendre(splitell[142][:, None, None], dotproductmatrix) # In[ ]: PlMat144 = eval_legendre(splitell[143][:, None, None], dotproductmatrix) # In[ ]: PlMat145 = eval_legendre(splitell[144][:, None, None], dotproductmatrix) # In[ ]: PlMat146 = eval_legendre(splitell[145][:, None, None], dotproductmatrix) # In[ ]: PlMat147 = eval_legendre(splitell[146][:, None, None], dotproductmatrix) # In[ ]: PlMat148 = eval_legendre(splitell[147][:, None, None], dotproductmatrix) # In[ ]: PlMat149 = eval_legendre(splitell[148][:, None, None], dotproductmatrix) # In[ ]: PlMat150 = eval_legendre(splitell[149][:, None, None], dotproductmatrix) # In[ ]: splitell[49] # In[ ]: PlMat_total = np.concatenate((PlMat1, PlMat2, PlMat3, PlMat4, PlMat5, PlMat6, PlMat7, PlMat8, PlMat9, PlMat10, PlMat11, PlMat12, PlMat13, PlMat14, PlMat15, PlMat16, PlMat17, PlMat18, PlMat19, PlMat20, PlMat21, PlMat22, PlMat23, PlMat24, PlMat25, PlMat26, PlMat27, PlMat28, PlMat29, PlMat30, PlMat31, PlMat32, PlMat33, PlMat34, PlMat35, PlMat36, PlMat37, PlMat38, PlMat39, PlMat40, PlMat41, PlMat42, PlMat43, PlMat44, PlMat45, PlMat46, PlMat47, PlMat48, PlMat49, PlMat50, PlMat51, PlMat52, PlMat53, PlMat54, PlMat55, PlMat56, PlMat57, PlMat58, PlMat59, PlMat60, PlMat61, PlMat62, PlMat63, PlMat64, PlMat65, PlMat66, PlMat67, PlMat68, PlMat69, PlMat70, PlMat71, PlMat72, PlMat73, PlMat74, PlMat75, PlMat76, PlMat77, PlMat78, PlMat79, PlMat80, PlMat81, PlMat82, PlMat83, PlMat84, PlMat85, PlMat86, PlMat87, PlMat88, PlMat89, PlMat90, PlMat91, PlMat92, PlMat93, PlMat94, PlMat95, PlMat96, PlMat97, PlMat98, PlMat99, PlMat100, PlMat101, PlMat102, PlMat103, PlMat104, PlMat105, PlMat106, PlMat107, PlMat108, PlMat109, PlMat110, PlMat111, PlMat112, PlMat113, PlMat114, PlMat115, PlMat116, PlMat117, PlMat118, PlMat119, PlMat120, PlMat121, PlMat122, PlMat123, PlMat124, PlMat125, PlMat126, PlMat127, PlMat128, PlMat129, PlMat130, PlMat131, PlMat132, PlMat133, PlMat134, PlMat135, PlMat136, PlMat137, PlMat138, PlMat139, PlMat140, PlMat141, PlMat142, PlMat143, PlMat144, PlMat145, PlMat146, PlMat147, PlMat148, PlMat149, PlMat150)) # In[ ]: import cPickle as pickle file_Name = "testfileNov18" # open the file for writing fileObject = open(file_Name,'wb') # this writes the object a to the # file named 'testfile' pickle.dump(PlMat_total, fileObject) # here we close the fileObject fileObject.close() # In[ ]: # Step 3: (2*l +1)/4pi from l=2 to l=lmax # [5/4pi 7/4pi 9/4pi 11/4pi .... 65/4pi ] #norm = ((2*ellval + 1))/(4*math.pi) #print(norm)
mit
mhdella/scikit-learn
examples/linear_model/lasso_dense_vs_sparse_data.py
348
1862
""" ============================== Lasso on dense and sparse data ============================== We show that linear_model.Lasso provides the same results for dense and sparse data and that in the case of sparse data the speed is improved. """ print(__doc__) from time import time from scipy import sparse from scipy import linalg from sklearn.datasets.samples_generator import make_regression from sklearn.linear_model import Lasso ############################################################################### # The two Lasso implementations on Dense data print("--- Dense matrices") X, y = make_regression(n_samples=200, n_features=5000, random_state=0) X_sp = sparse.coo_matrix(X) alpha = 1 sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000) dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000) t0 = time() sparse_lasso.fit(X_sp, y) print("Sparse Lasso done in %fs" % (time() - t0)) t0 = time() dense_lasso.fit(X, y) print("Dense Lasso done in %fs" % (time() - t0)) print("Distance between coefficients : %s" % linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_)) ############################################################################### # The two Lasso implementations on Sparse data print("--- Sparse matrices") Xs = X.copy() Xs[Xs < 2.5] = 0.0 Xs = sparse.coo_matrix(Xs) Xs = Xs.tocsc() print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100)) alpha = 0.1 sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000) dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000) t0 = time() sparse_lasso.fit(Xs, y) print("Sparse Lasso done in %fs" % (time() - t0)) t0 = time() dense_lasso.fit(Xs.toarray(), y) print("Dense Lasso done in %fs" % (time() - t0)) print("Distance between coefficients : %s" % linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
bsd-3-clause
xiaoxiamii/scikit-learn
sklearn/utils/__init__.py
79
14202
""" The :mod:`sklearn.utils` module includes various utilities. """ from collections import Sequence import numpy as np from scipy.sparse import issparse import warnings from .murmurhash import murmurhash3_32 from .validation import (as_float_array, assert_all_finite, check_random_state, column_or_1d, check_array, check_consistent_length, check_X_y, indexable, check_symmetric, DataConversionWarning) from .class_weight import compute_class_weight, compute_sample_weight from ..externals.joblib import cpu_count __all__ = ["murmurhash3_32", "as_float_array", "assert_all_finite", "check_array", "check_random_state", "compute_class_weight", "compute_sample_weight", "column_or_1d", "safe_indexing", "check_consistent_length", "check_X_y", 'indexable', "check_symmetric"] class deprecated(object): """Decorator to mark a function or class as deprecated. Issue a warning when the function is called/the class is instantiated and adds a warning to the docstring. The optional extra argument will be appended to the deprecation message and the docstring. Note: to use this with the default value for extra, put in an empty of parentheses: >>> from sklearn.utils import deprecated >>> deprecated() # doctest: +ELLIPSIS <sklearn.utils.deprecated object at ...> >>> @deprecated() ... def some_function(): pass """ # Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary, # but with many changes. def __init__(self, extra=''): """ Parameters ---------- extra: string to be added to the deprecation messages """ self.extra = extra def __call__(self, obj): if isinstance(obj, type): return self._decorate_class(obj) else: return self._decorate_fun(obj) def _decorate_class(self, cls): msg = "Class %s is deprecated" % cls.__name__ if self.extra: msg += "; %s" % self.extra # FIXME: we should probably reset __new__ for full generality init = cls.__init__ def wrapped(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning) return init(*args, **kwargs) cls.__init__ = wrapped wrapped.__name__ = '__init__' wrapped.__doc__ = self._update_doc(init.__doc__) wrapped.deprecated_original = init return cls def _decorate_fun(self, fun): """Decorate function fun""" msg = "Function %s is deprecated" % fun.__name__ if self.extra: msg += "; %s" % self.extra def wrapped(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning) return fun(*args, **kwargs) wrapped.__name__ = fun.__name__ wrapped.__dict__ = fun.__dict__ wrapped.__doc__ = self._update_doc(fun.__doc__) return wrapped def _update_doc(self, olddoc): newdoc = "DEPRECATED" if self.extra: newdoc = "%s: %s" % (newdoc, self.extra) if olddoc: newdoc = "%s\n\n%s" % (newdoc, olddoc) return newdoc def safe_mask(X, mask): """Return a mask which is safe to use on X. Parameters ---------- X : {array-like, sparse matrix} Data on which to apply mask. mask: array Mask to be used on X. Returns ------- mask """ mask = np.asarray(mask) if np.issubdtype(mask.dtype, np.int): return mask if hasattr(X, "toarray"): ind = np.arange(mask.shape[0]) mask = ind[mask] return mask def safe_indexing(X, indices): """Return items or rows from X using indices. Allows simple indexing of lists or arrays. Parameters ---------- X : array-like, sparse-matrix, list. Data from which to sample rows or items. indices : array-like, list Indices according to which X will be subsampled. """ if hasattr(X, "iloc"): # Pandas Dataframes and Series try: return X.iloc[indices] except ValueError: # Cython typed memoryviews internally used in pandas do not support # readonly buffers. warnings.warn("Copying input dataframe for slicing.", DataConversionWarning) return X.copy().iloc[indices] elif hasattr(X, "shape"): if hasattr(X, 'take') and (hasattr(indices, 'dtype') and indices.dtype.kind == 'i'): # This is often substantially faster than X[indices] return X.take(indices, axis=0) else: return X[indices] else: return [X[idx] for idx in indices] def resample(*arrays, **options): """Resample arrays or sparse matrices in a consistent way The default strategy implements one step of the bootstrapping procedure. Parameters ---------- *arrays : sequence of indexable data-structures Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. replace : boolean, True by default Implements resampling with replacement. If False, this will implement (sliced) random permutations. n_samples : int, None by default Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. random_state : int or RandomState instance Control the shuffling for reproducible behavior. Returns ------- resampled_arrays : sequence of indexable data-structures Sequence of resampled views of the collections. The original arrays are not impacted. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import resample >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0) >>> X array([[ 1., 0.], [ 2., 1.], [ 1., 0.]]) >>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE <3x2 sparse matrix of type '<... 'numpy.float64'>' with 4 stored elements in Compressed Sparse Row format> >>> X_sparse.toarray() array([[ 1., 0.], [ 2., 1.], [ 1., 0.]]) >>> y array([0, 1, 0]) >>> resample(y, n_samples=2, random_state=0) array([0, 1]) See also -------- :func:`sklearn.utils.shuffle` """ random_state = check_random_state(options.pop('random_state', None)) replace = options.pop('replace', True) max_n_samples = options.pop('n_samples', None) if options: raise ValueError("Unexpected kw arguments: %r" % options.keys()) if len(arrays) == 0: return None first = arrays[0] n_samples = first.shape[0] if hasattr(first, 'shape') else len(first) if max_n_samples is None: max_n_samples = n_samples if max_n_samples > n_samples: raise ValueError("Cannot sample %d out of arrays with dim %d" % ( max_n_samples, n_samples)) check_consistent_length(*arrays) if replace: indices = random_state.randint(0, n_samples, size=(max_n_samples,)) else: indices = np.arange(n_samples) random_state.shuffle(indices) indices = indices[:max_n_samples] # convert sparse matrices to CSR for row-based indexing arrays = [a.tocsr() if issparse(a) else a for a in arrays] resampled_arrays = [safe_indexing(a, indices) for a in arrays] if len(resampled_arrays) == 1: # syntactic sugar for the unit argument case return resampled_arrays[0] else: return resampled_arrays def shuffle(*arrays, **options): """Shuffle arrays or sparse matrices in a consistent way This is a convenience alias to ``resample(*arrays, replace=False)`` to do random permutations of the collections. Parameters ---------- *arrays : sequence of indexable data-structures Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. random_state : int or RandomState instance Control the shuffling for reproducible behavior. n_samples : int, None by default Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. Returns ------- shuffled_arrays : sequence of indexable data-structures Sequence of shuffled views of the collections. The original arrays are not impacted. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import shuffle >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0) >>> X array([[ 0., 0.], [ 2., 1.], [ 1., 0.]]) >>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE <3x2 sparse matrix of type '<... 'numpy.float64'>' with 3 stored elements in Compressed Sparse Row format> >>> X_sparse.toarray() array([[ 0., 0.], [ 2., 1.], [ 1., 0.]]) >>> y array([2, 1, 0]) >>> shuffle(y, n_samples=2, random_state=0) array([0, 1]) See also -------- :func:`sklearn.utils.resample` """ options['replace'] = False return resample(*arrays, **options) def safe_sqr(X, copy=True): """Element wise squaring of array-likes and sparse matrices. Parameters ---------- X : array like, matrix, sparse matrix copy : boolean, optional, default True Whether to create a copy of X and operate on it or to perform inplace computation (default behaviour). Returns ------- X ** 2 : element wise square """ X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False) if issparse(X): if copy: X = X.copy() X.data **= 2 else: if copy: X = X ** 2 else: X **= 2 return X def gen_batches(n, batch_size): """Generator to create slices containing batch_size elements, from 0 to n. The last slice may contain less than batch_size elements, when batch_size does not divide n. Examples -------- >>> from sklearn.utils import gen_batches >>> list(gen_batches(7, 3)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(6, 3)) [slice(0, 3, None), slice(3, 6, None)] >>> list(gen_batches(2, 3)) [slice(0, 2, None)] """ start = 0 for _ in range(int(n // batch_size)): end = start + batch_size yield slice(start, end) start = end if start < n: yield slice(start, n) def gen_even_slices(n, n_packs, n_samples=None): """Generator to create n_packs slices going up to n. Pass n_samples when the slices are to be used for sparse matrix indexing; slicing off-the-end raises an exception, while it works for NumPy arrays. Examples -------- >>> from sklearn.utils import gen_even_slices >>> list(gen_even_slices(10, 1)) [slice(0, 10, None)] >>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS [slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)] >>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS [slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)] >>> list(gen_even_slices(10, 3)) [slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)] """ start = 0 if n_packs < 1: raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs) for pack_num in range(n_packs): this_n = n // n_packs if pack_num < n % n_packs: this_n += 1 if this_n > 0: end = start + this_n if n_samples is not None: end = min(n_samples, end) yield slice(start, end, None) start = end def _get_n_jobs(n_jobs): """Get number of jobs for the computation. This function reimplements the logic of joblib to determine the actual number of jobs depending on the cpu count. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. Parameters ---------- n_jobs : int Number of jobs stated in joblib convention. Returns ------- n_jobs : int The actual number of jobs as positive integer. Examples -------- >>> from sklearn.utils import _get_n_jobs >>> _get_n_jobs(4) 4 >>> jobs = _get_n_jobs(-2) >>> assert jobs == max(cpu_count() - 1, 1) >>> _get_n_jobs(0) Traceback (most recent call last): ... ValueError: Parameter n_jobs == 0 has no meaning. """ if n_jobs < 0: return max(cpu_count() + 1 + n_jobs, 1) elif n_jobs == 0: raise ValueError('Parameter n_jobs == 0 has no meaning.') else: return n_jobs def tosequence(x): """Cast iterable x to a Sequence, avoiding a copy if possible.""" if isinstance(x, np.ndarray): return np.asarray(x) elif isinstance(x, Sequence): return x else: return list(x) class ConvergenceWarning(UserWarning): """Custom warning to capture convergence problems""" class DataDimensionalityWarning(UserWarning): """Custom warning to notify potential issues with data dimensionality"""
bsd-3-clause
tectronics/pmtk3
python/demos/linregDemo1.py
26
1104
#!/usr/bin/python2.4 import numpy import scipy.stats import matplotlib.pyplot as plt def main(): # true parameters w = 2 w0 = 3 sigma = 2 # make data numpy.random.seed(1) Ntrain = 20 xtrain = numpy.linspace(0,10,Ntrain) ytrain = w*xtrain + w0 + numpy.random.random(Ntrain)*sigma Ntest = 100 xtest = numpy.linspace(0,10,Ntest) ytest = w*xtest + w0 + numpy.random.random(Ntest)*sigma # from http://www2.warwick.ac.uk/fac/sci/moac/students/peter_cock/python/lin_reg/ # fit west, w0est, r_value, p_value, std_err = scipy.stats.linregress(xtrain, ytrain) # display print "Param \t True \t Est" print "w0 \t %5.3f \t %5.3f" % (w0, w0est) print "w \t %5.3f \t %5.3f" % (w, west) # plot plt.close() plt.plot(xtrain, ytrain, 'ro') plt.hold(True) #plt.plot(xtest, ytest, 'ka-') ytestPred = west*xtest + w0est #ndx = range(0, Ntest, 10) #h = plt.plot(xtest[ndx], ytestPred[ndx], 'b*') h = plt.plot(xtest, ytestPred, 'b-') plt.setp(h, 'markersize', 12) if __name__ == '__main__': main()
mit
tarashor/vibrations
py/main.py
1
9937
import fem.model import fem.mesh import fem.solver import fem.geometry as g import utils import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.mplot3d import Axes3D import numpy as np def solve(width, curvature, corrugation_amplitude, corrugation_frequency, layers, N, M): geometry = g.CorrugatedCylindricalPlate(width, curvature, corrugation_amplitude, corrugation_frequency) model = fem.model.Model(geometry, layers, fem.model.Model.FIXED_BOTTOM_LEFT_RIGHT_POINTS) mesh = fem.mesh.Mesh.generate(model.geometry.width, layers, N, M, model.boundary_conditions) return fem.solver.solve(model, mesh) # return fem.solver.solve_nonlinearity(model, mesh) def get_lowest_freq(width, thickness, curvature, corrugation_amplitude, corrugation_frequency, layers_count, N, M): layer_top = thickness / 2 layer_thickness = thickness / layers_count layers = set() for i in range(layers_count): layer = fem.model.Layer(layer_top - layer_thickness, layer_top, fem.model.Material.steel(), i) layers.add(layer) layer_top -= layer_thickness return solve(width, curvature, corrugation_amplitude, corrugation_frequency, layers, N, M) def plot_displacement_norm(v1, v2, nodes, layers_count, N, M): x = set() y = set() list_nodes = sorted(nodes, key=lambda n: n.index) v = np.zeros((layers_count * M + 1, N + 1)) for n in list_nodes: x.add(n.x) y.add(n.y) i = n.index // (N + 1) j = n.index % (N + 1) norm = np.sqrt(v1[n.index] * v1[n.index] + v2[n.index] * v2[n.index]) v[i, j] = norm # v[i, j] = v2[n.index] x = sorted(x) y = sorted(y) (X, Y) = np.meshgrid(x, y) surf = plt.contourf(X, Y, v, cmap=cm.rainbow) plt.colorbar(surf) plt.show() def plot_strain_norm(result, layers_count, freq_index, M, N): x = set() y = set() list_nodes = sorted(result.get_nodes(), key=lambda n: n.index) v = np.zeros((layers_count * M + 1, N + 1)) for n in list_nodes: x.add(n.x) y.add(n.y) i = n.index // (N + 1) j = n.index % (N + 1) v1 = result.get_strain(freq_index, n.x, n.y)[5] #norm = np.sqrt(v1[n.index] * v1[n.index] + v2[n.index] * v2[n.index]) v[i, j] = v1 # v[i, j] = v2[n.index] x = sorted(x) y = sorted(y) (X, Y) = np.meshgrid(x, y) surf = plt.contourf(X, Y, v, cmap=cm.rainbow) plt.colorbar(surf) plt.show() def plot_init_geometry(width, thickness, curvature, corrugation_amplitude, corrugation_frequency, layers_count, N, M): result = get_lowest_freq(width, thickness, curvature, corrugation_amplitude, corrugation_frequency, layers_count, N, M) plot_strain_norm(result, layers_count, 0, M, N) l, v1, v2, nodes = result.get_result_min() print("Min freq = {}".format(l)) lnodes = sorted(nodes, key=lambda n: n.index) X_init = [] Y_init = [] X_deformed = [] Y_deformed = [] for i in range(N + 1): ind = (layers_count * M) * (N + 1) + i # ind = i x = lnodes[ind].x y = lnodes[ind].y if (curvature > 0): ar = (np.pi + curvature * width) / 2 - x * curvature x = (1 / curvature + y) * np.cos(ar) y = (1 / curvature + y) * np.sin(ar) X_init.append(x) Y_init.append(y) x = lnodes[ind].x + v1[lnodes[ind].index] y = lnodes[ind].y + v2[lnodes[ind].index] if (curvature > 0): ar = (np.pi + curvature * width) / 2 - x * curvature x = (1 / curvature + y) * np.cos(ar) y = (1 / curvature + y) * np.sin(ar) X_deformed.append(x) Y_deformed.append(y) for i in range(N + 1): ind = N - i x = lnodes[ind].x y = lnodes[ind].y if (curvature > 0): ar = (np.pi + curvature * width) / 2 - x * curvature x = (1 / curvature + y) * np.cos(ar) y = (1 / curvature + y) * np.sin(ar) X_init.append(x) Y_init.append(y) x = lnodes[ind].x + v1[lnodes[ind].index] y = lnodes[ind].y + v2[lnodes[ind].index] if (curvature > 0): ar = (np.pi + curvature * width) / 2 - x * curvature x = (1 / curvature + y) * np.cos(ar) y = (1 / curvature + y) * np.sin(ar) X_deformed.append(x) Y_deformed.append(y) X_init.append(X_init[0]) Y_init.append(Y_init[0]) X_deformed.append(X_deformed[0]) Y_deformed.append(Y_deformed[0]) plt.plot(X_init, Y_init, label="початкова конфігурація") plt.plot(X_deformed, Y_deformed, label="поточна конфігурація") plt.title("Деформації при {}-ій власній частоті".format(3)) # plt.title(r"Форма панелі з такими параметрами $l={}, h={}, K={}, g_A={}, g_v={}$".format(width, thickness, curvature, corrugation_amplitude, corrugation_frequency)) # plt.axis([-1, 1, 3, 5]) plt.legend(loc='best') plt.grid() plt.show() def plot_sample(width, thickness, curvature, corrugation_amplitude, corrugation_frequency, layers_count, N, M): l, v1, v2, nodes = get_lowest_freq(width, thickness, curvature, corrugation_amplitude, corrugation_frequency, layers_count, N, M) print(l) plot_displacement_norm(v1, v2, nodes, layers_count, N, M) def plot_freq_from_corrugation_frequency(width, thickness, curvature, corrugation_amplitude, corrugation_frequencies, layers_count, N, M): cfs = [] ls = [] for cf in corrugation_frequencies: l, v1, v2, nodes = get_lowest_freq(width, thickness, curvature, corrugation_amplitude, cf, layers_count, N, M) # print("{},{}".format(cf, l)) # data.append([cf, l]) cfs.append(cf) ls.append(l) plt.plot(cfs, ls, 'o-') plt.xlabel(r"$g_v$") plt.ylabel(r"$\omega_{min}$") plt.title(r"Залежність $\omega_{min}$ від $g_v$" + r"($N={}, M={}$)".format(N, M)) plt.grid() plt.show() def calculate_data_freq_from_NxM(width, thickness, curvature, corrugation_amplitude, corrugation_frequency, layers_count, N_max, M_max): data = [] for n in range(40, N_max + 1, 40): for m in range(4, M_max + 1, 4): l, v1, v2, nodes = get_lowest_freq(width, thickness, curvature, corrugation_amplitude, corrugation_frequency, layers_count, n, m) print("{},{},{}".format(n, m, l)) data.append([n, m, l]) return data def calculate_data_freq_from_layers_count(width, thickness, curvature, corrugation_amplitude, corrugation_frequency, N, M, layers_count_max): data = [] for lc in range(layers_count_max + 1): l, v1, v2, nodes = get_lowest_freq(width, thickness, curvature, corrugation_amplitude, corrugation_frequency, lc, N, M) data.append([lc, l]) return data def plot_freq_from_NxM(data): # Make data. print(data) n = set() m = set() for line in data: n.add(int(line[0])) m.add(int(line[1])) n = sorted(n) m = sorted(m) z = np.zeros((len(n), len(m))) for i in range(len(n)): for j in range(len(m)): freq = 0 for item in data: if (int(item[0]) == n[i] and int(item[1]) == m[j]): freq = float(item[2]) break z[i, j] = freq fig = plt.figure() ax = fig.gca(projection='3d') m, n = np.meshgrid(m, n) print(n) print(m) print(z) surf = ax.plot_surface(n, m, z, cmap=cm.rainbow) ax.set_yticks(np.arange(4, 17, 2)) ax.set_xlabel(r'$N$', fontsize=14) ax.set_ylabel(r'$M$', fontsize=14) ax.set_zlabel(r'$\omega_{min} $', fontsize=14) ax.zaxis.set_rotate_label(False) ax.title.set_text(r'Залежність $\omega_{min}$ від к-сті елементів по товщині $M$ і по довжині $N$ (к-сть шарів = 1)') # Add a color bar which maps values to colors. fig.colorbar(surf, shrink=0.5, aspect=5) plt.show() def plot_freq_from_layers_count(data): x = [] y = [] for line in data: x.append(int(line[0])) y.append(float(line[1])) plt.plot(x, y) plt.xlabel("Кількість шарів") plt.ylabel(r"$\omega_{min}$") plt.title(r"Залежність $\omega_{min}$ від к-сті шарів сталі ($M=70, N=10, h=0.05$)") plt.grid() plt.show() width = 2 curvature = 0.08 thickness = 0.05 corrugation_amplitude = 0.03 corrugation_frequency = 20 freq_from_NM_file = "freq_from_NxM" freq_from_layers_file = "freq_from_layers_count" freq_from_NM_file_done = "freq_from_NxM_done" freq_from_layers_file_done = "freq_from_layers_count_done" layers_count_default = 1 N_default = 40 M_default = 4 # 1 # data = calculate_data_freq_from_NxM(width, thickness, curvature, corrugation_amplitude, corrugation_frequency, layers_count_default, 240, 16) # utils.save_in_file(freq_from_NM_file, data) # 2 # data = utils.read_from_file(freq_from_NM_file_done) # plot_freq_from_NxM(data) # 3 # data = calculate_data_freq_from_layers_count(width, thickness, curvature, corrugation_amplitude, corrugation_frequency, N_default, M_default, 5) # utils.save_in_file(freq_from_layers_file, data) # 4 # data = utils.read_from_file(freq_from_layers_file_done) # plot_freq_from_layers_count(data) # 5 # plot_sample(width, thickness, curvature, corrugation_amplitude, corrugation_frequency, layers_count_default, N_default, M_default) # 6 plot_init_geometry(width, thickness, curvature, corrugation_amplitude, corrugation_frequency, layers_count_default, N_default, M_default) # 7 # plot_freq_from_corrugation_frequency(width, thickness, curvature, corrugation_amplitude, [2,4,6,8,10,12,16,20,26,50,80,100], layers_count_default, N_default, M_default)
mit
kcompher/FreeDiscovUI
freediscovery/utils.py
1
7763
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os import sys import os.path from contextlib import contextmanager import pandas as pd import numpy as np import uuid try: # sklearn v0.17 from sklearn.exceptions import UndefinedMetricWarning except ImportError: # v0.18 from sklearn.metrics.base import UndefinedMetricWarning from .exceptions import (DatasetNotFound, ModelNotFound, InitException, WrongParameter) @contextmanager def _silent(stream='stderr'): stderr = getattr(sys, stream) fh = open(os.devnull, 'w') sys.stderr = fh yield setattr(sys, stream, stderr) INT_NAN = -99999 def categorization_score(idx_ref, Y_ref, idx, Y): """ Calculate the efficiency scores """ # This function should be deprecated # An equivalent functionally should be achieved with a # more general freediscovery.metrics module import warnings from sklearn.metrics import (precision_score, recall_score, f1_score, roc_auc_score, average_precision_score) threshold = 0.0 idx = np.asarray(idx, dtype='int') idx_ref = np.asarray(idx_ref, dtype='int') Y = np.asarray(Y) Y_ref = np.asarray(Y_ref) idx_out = np.intersect1d(idx_ref, idx) if not len(idx_out): return {"recall_score": -1, "precision_score": -1, 'f1': -1, 'auc_roc': -1, 'average_precision': -1} # sort values by index order_ref = idx_ref.argsort() idx_ref = idx_ref[order_ref] Y_ref = Y_ref[order_ref] order = idx.argsort() idx = idx[order] Y = Y[order] # find indices that are in both the reference and the test dataset mask_ref = np.in1d(idx_ref, idx_out) mask = np.in1d(idx, idx_out) Y_ref = Y_ref[mask_ref] Y = Y[mask] Y_bin = (Y > threshold) with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) m_recall_score = recall_score(Y_ref, Y_bin) m_precision_score = precision_score(Y_ref, Y_bin) m_f1_score = f1_score(Y_ref, Y_bin) if len(np.unique(Y_ref)) == 2: m_roc_auc = roc_auc_score(Y_ref, Y) else: m_roc_auc = np.nan # ROC not defined in this case m_average_precision = average_precision_score(Y_ref, Y) return {"recall": m_recall_score, "precision": m_precision_score, "f1": m_f1_score, 'roc_auc': m_roc_auc, 'average_precision': m_average_precision } def _rename_main_thread(): """ This aims to address the fact that joblib wrongly detects uWSGI workers as running in the non main thread even when they are not see https://github.com/joblib/joblib/issues/180 """ import threading if isinstance(threading.current_thread(), threading._MainThread) and \ threading.current_thread().name != 'MainThread': print('Warning: joblib: renaming current thread {} to "MainThread".'.format(threading.current_thread().name)) threading.current_thread().name = 'MainThread' def _count_duplicates(x): """Return y an array of the same shape as x with the number of duplicates for each element""" _, indices, counts = np.unique(x, return_counts=True, return_inverse=True) return counts[indices] def generate_uuid(size=16): """ Generate a unique id for the model """ sl = slice(size) return uuid.uuid4().hex[sl] # a new random id def setup_model(base_path): """ Generate a unique model id and create the corresponding folder for storing results """ mid = generate_uuid() mid_dir = os.path.join(base_path, mid) # hash collision; should not happen if os.path.exists(mid_dir): os.remove(mid_dir) # removing the old folder nevertheless os.mkdir(mid_dir) return mid, mid_dir def _docstring_description(docstring): """ Given a function docstring, return only the text prior to the "Parameters" section""" res = [] for line in docstring.splitlines(): if line.strip() == 'Parameters': break res.append(line) return '\n'.join(res) def _query_features(vect, X, indices, n_top_words=10, remove_stop_words=False): """ Query the features with most weight Parameters ---------- vect : TfidfVectorizer the vectorizer object X : ndarray the document term tfidf array indices : list or ndarray indices for the subcluster n_top_words : int the number of workds to return remove_stop_words : bool remove stop words """ from .cluster.base import select_top_words # this should raise a warning when used with wrong weights X = X[indices] centroid = X.sum(axis=0).view(type=np.ndarray)[0] / len(indices) order_centroid = centroid.argsort()[::-1] terms = vect.get_feature_names() out = [] for ridx, idx in enumerate(order_centroid): if len(out) >= n_top_words: break if remove_stop_words: out += select_top_words([terms[idx]]) else: out.append(terms[idx]) return out def dict2type(d, collapse_lists=False, max_depth=10): """Recursively walk though the object and replace all dict values by their type Parameters ---------- collapse_lists : bool collapse a list to a single element max_depth : bool maximum depth on which the typing would be computed """ if max_depth == 0: res = type(d).__name__ if res == 'unicode': res = 'str' return res if isinstance(d, dict): res = {} for key, val in d.items(): res[key] = dict2type(val, collapse_lists, max_depth - 1) return res elif isinstance(d, list): res = [dict2type(el, collapse_lists, max_depth - 1) for el in d] if collapse_lists: res = list(set(res)) return res else: res = type(d).__name__ if res == 'unicode': res = 'str' return res def sdict_keys(x): """Sorted dictionary keys of x""" return list(sorted(x.keys())) def assert_equal_dict_keys(d1, d2, path=''): """ Recursively check that all dict keys are the same between dictionary a and b """ #Adapted from: https://stackoverflow.com/a/27266178/1791279 error_msg = [] if sdict_keys(d1) != sdict_keys(d2): error_msg.append('Key at {} do not match {} != {}'.format( path, sdict_keys(d1), sdict_keys(d2))) for key, val in d1.items(): if isinstance(val, dict): if path == "": path = key else: path = path + "->" + key if isinstance(d2[key], dict): error_msg += assert_equal_dict_keys(d1[key], d2[key], path) else: error_msg.append(path + ' differ') elif isinstance(val, list) and key in d2 and isinstance(d2[key], list) \ and len(d1[key]) > 0 and len(d2[key]) > 0 \ and isinstance(d1[key][0], dict) and isinstance(d2[key][0], dict): d1_list_keys = set(["_".join(sdict_keys(el)) for el in d1[key]]) d2_list_keys = set(["_".join(sdict_keys(el)) for el in d2[key]]) if len(d1_list_keys) == 1 and len(d2_list_keys) == 1: if path == "": path = key else: path = path + "->" + key error_msg += assert_equal_dict_keys(d1[key][0], d2[key][0], path) if error_msg: assert False, "\n".join(error_msg) else: return error_msg
bsd-3-clause
nmayorov/scikit-learn
examples/linear_model/plot_ard.py
29
2828
""" ================================================== Automatic Relevance Determination Regression (ARD) ================================================== Fit regression model with Bayesian Ridge Regression. See :ref:`bayesian_ridge_regression` for more information on the regressor. Compared to the OLS (ordinary least squares) estimator, the coefficient weights are slightly shifted toward zeros, which stabilises them. The histogram of the estimated weights is very peaked, as a sparsity-inducing prior is implied on the weights. The estimation of the model is done by iteratively maximizing the marginal log-likelihood of the observations. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from scipy import stats from sklearn.linear_model import ARDRegression, LinearRegression ############################################################################### # Generating simulated data with Gaussian weights # Parameters of the example np.random.seed(0) n_samples, n_features = 100, 100 # Create Gaussian data X = np.random.randn(n_samples, n_features) # Create weights with a precision lambda_ of 4. lambda_ = 4. w = np.zeros(n_features) # Only keep 10 weights of interest relevant_features = np.random.randint(0, n_features, 10) for i in relevant_features: w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_)) # Create noite with a precision alpha of 50. alpha_ = 50. noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples) # Create the target y = np.dot(X, w) + noise ############################################################################### # Fit the ARD Regression clf = ARDRegression(compute_score=True) clf.fit(X, y) ols = LinearRegression() ols.fit(X, y) ############################################################################### # Plot the true weights, the estimated weights and the histogram of the # weights plt.figure(figsize=(6, 5)) plt.title("Weights of the model") plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2, label="ARD estimate") plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2, label="OLS estimate") plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth") plt.xlabel("Features") plt.ylabel("Values of the weights") plt.legend(loc=1) plt.figure(figsize=(6, 5)) plt.title("Histogram of the weights") plt.hist(clf.coef_, bins=n_features, color='navy', log=True) plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)), color='gold', marker='o', label="Relevant features") plt.ylabel("Features") plt.xlabel("Values of the weights") plt.legend(loc=1) plt.figure(figsize=(6, 5)) plt.title("Marginal log-likelihood") plt.plot(clf.scores_, color='navy', linewidth=2) plt.ylabel("Score") plt.xlabel("Iterations") plt.show()
bsd-3-clause
pratapvardhan/pandas
asv_bench/benchmarks/algorithms.py
3
3286
import warnings from importlib import import_module import numpy as np import pandas as pd from pandas.util import testing as tm for imp in ['pandas.util', 'pandas.tools.hashing']: try: hashing = import_module(imp) break except: pass from .pandas_vb_common import setup # noqa class Factorize(object): goal_time = 0.2 params = [True, False] param_names = ['sort'] def setup(self, sort): N = 10**5 self.int_idx = pd.Int64Index(np.arange(N).repeat(5)) self.float_idx = pd.Float64Index(np.random.randn(N).repeat(5)) self.string_idx = tm.makeStringIndex(N) def time_factorize_int(self, sort): self.int_idx.factorize(sort=sort) def time_factorize_float(self, sort): self.float_idx.factorize(sort=sort) def time_factorize_string(self, sort): self.string_idx.factorize(sort=sort) class Duplicated(object): goal_time = 0.2 params = ['first', 'last', False] param_names = ['keep'] def setup(self, keep): N = 10**5 self.int_idx = pd.Int64Index(np.arange(N).repeat(5)) self.float_idx = pd.Float64Index(np.random.randn(N).repeat(5)) self.string_idx = tm.makeStringIndex(N) def time_duplicated_int(self, keep): self.int_idx.duplicated(keep=keep) def time_duplicated_float(self, keep): self.float_idx.duplicated(keep=keep) def time_duplicated_string(self, keep): self.string_idx.duplicated(keep=keep) class DuplicatedUniqueIndex(object): goal_time = 0.2 def setup(self): N = 10**5 self.idx_int_dup = pd.Int64Index(np.arange(N * 5)) # cache is_unique self.idx_int_dup.is_unique def time_duplicated_unique_int(self): self.idx_int_dup.duplicated() class Match(object): goal_time = 0.2 def setup(self): self.uniques = tm.makeStringIndex(1000).values self.all = self.uniques.repeat(10) def time_match_string(self): with warnings.catch_warnings(record=True): pd.match(self.all, self.uniques) class Hashing(object): goal_time = 0.2 def setup_cache(self): N = 10**5 df = pd.DataFrame( {'strings': pd.Series(tm.makeStringIndex(10000).take( np.random.randint(0, 10000, size=N))), 'floats': np.random.randn(N), 'ints': np.arange(N), 'dates': pd.date_range('20110101', freq='s', periods=N), 'timedeltas': pd.timedelta_range('1 day', freq='s', periods=N)}) df['categories'] = df['strings'].astype('category') df.iloc[10:20] = np.nan return df def time_frame(self, df): hashing.hash_pandas_object(df) def time_series_int(self, df): hashing.hash_pandas_object(df['ints']) def time_series_string(self, df): hashing.hash_pandas_object(df['strings']) def time_series_float(self, df): hashing.hash_pandas_object(df['floats']) def time_series_categorical(self, df): hashing.hash_pandas_object(df['categories']) def time_series_timedeltas(self, df): hashing.hash_pandas_object(df['timedeltas']) def time_series_dates(self, df): hashing.hash_pandas_object(df['dates'])
bsd-3-clause
aejax/KerasRL
test.py
1
8131
from agent import * from value import * from keras.optimizers import * import keras.backend as K import gym import timeit import argparse import cPickle as pkl import matplotlib #Force matplotlib to use any Xwindows backend matplotlib.use('Agg') import matplotlib.pyplot as plt import simple_dqn import atari_dqn import matching_q import ec def yntotf(s): if s.lower() == 'y': return True elif s.lower() == 'n': return False else: print '\'{}\' cannot be converted to True or False.'.format(s) return None def run(env, agent, n_episode, tMax, log_freq, render, monitor, plot, s_dir, save, save_freq, time): returns = [] losses = [] if monitor: env.monitor.start('{}/{}'.format(s_dir, agent.name), force=True) if hasattr(agent, 'frame_count'): count_steps = agent.frame_count else: count_steps = 0 try: for episode in xrange(n_episode): observation = env.reset() done = False l_sum = agent.observe(observation, 0, done) count_steps += 1 r_sum = 0 timer = 0 s = timeit.default_timer() while not done: count_steps += 1 if render and (episode+1)%log_freq == 0: env.render(mode='human') action = agent.act(n=count_steps-agent.random_start) observation, reward, done, info = env.step(action) # End the episode at tMax timer += 1 if timer == tMax: done = True loss = agent.observe(observation, reward, done) l_sum += loss r_sum += reward if timer == tMax: done = True if (episode+1)%log_freq == 0: print 'Episode {} finished with return of {}.'.format(episode+1,r_sum) e = timeit.default_timer() if time: print 'Training step time: ', (e - s) / timer print 'Training steps: ', timer if hasattr(agent, 'times'): agent.times() returns.append(r_sum) losses.append(l_sum / timer) # save the agent if save and (episode+1)%save_freq == 0: print 'Saving agent to {}'.format(s_dir) begin = timeit.default_timer() agent.save(s_dir) end = timeit.default_timer() dt = end - begin print 'Save time: {:}min {:.3}s'.format(dt // 60, dt % 60) except KeyboardInterrupt: pass if monitor: env.monitor.close() def movingaverage(values, window): weights = np.repeat(1.0, window)/window sma = np.convolve(values, weights, 'valid') return sma window_size = 100 rMVA = movingaverage(returns,window_size) lMVA = movingaverage(losses,window_size) ave_r = reduce(lambda x, y: x+y, returns) / n_episode print 'Average Reward: {}'.format(ave_r) print 'Max 100 Episode Average Reward: {}'.format(rMVA.max()) print 'Number of environment steps: {}'.format(count_steps) results = {'losses': losses, 'returns': returns} pkl.dump(results, open('{}/{}_results.pkl'.format(s_dir,agent.name), 'w')) #print sorted(agent.Q_ec.table.keys()) #for k in agent.Q_ec.table: # p = np.random.random(1) # if p < 0.01: # print agent.Q_ec.table[k] #print len(agent.Q_ec.table) #for s in agent.Q_ec.states: # p = np.random.random(1) # if p < 0.001: # print s #print agent.Q_ec.i #for i in xrange(agent.A.n): # print len(agent.Q_ec.action_tables[i][0]), if plot: plt.figure() plt.subplot(121) plt.plot(rMVA) plt.title('Rewards') plt.xlabel('Average rewards per {} episodes'.format(window_size)) plt.subplot(122) plt.plot(lMVA) plt.title('Loss') plt.xlabel('Average losses per {} episodes'.format(window_size)) plt.savefig('{}/{}.png'.format(s_dir, agent.name), format='png') plt.close('all') def test_session(env_name, agent_name, n_episode, log_freq, interactive, l_dir, s_dir, save_freq): from gym.envs.toy_text.frozen_lake import FrozenLakeEnv env = FrozenLakeEnv(is_slippery=False) #env = gym.make(env_name) S = env.observation_space A = env.action_space #set defaults if s_dir == '': s_dir = env_name if l_dir == '': l_dir = s_dir render = False monitor = False plot = False save = False load = False time = True if interactive: time = yntotf(raw_input('Time? [y/n] ')) render = yntotf(raw_input('Render? [y/n] ')) monitor = yntotf(raw_input('Monitor? [y/n] ')) plot = yntotf(raw_input('Plot? [y/n] ')) save = yntotf(raw_input('Save? [y/n] ')) if save: tmp = raw_input('Save directory? (default is {}): '.format(s_dir)) s_dir = tmp if tmp != '' else s_dir print s_dir load = yntotf(raw_input('Load? [y/n] ')) if load: tmp = raw_input('Load directory? (default is {}): '.format(l_dir)) l_dir = tmp if tmp != '' else l_dir print l_dir # define the save and load directory import os import os.path file_path = './' + s_dir if not os.path.exists(file_path): os.mkdir(s_dir) file_path = './' + l_dir if not os.path.exists(file_path): os.mkdir(l_dir) #define run length n_episode = n_episode #tMax = env.spec.timestep_limit tMax = 100 log_freq = log_freq save_freq = save_freq #define agent if agent_name == 'simple_dqn': if load: agent = simple_dqn.load(l_dir, env) else: agent = simple_dqn.get_agent(env) elif agent_name == 'atari_dqn': if load: agent = atari_dqn.load(l_dir, env) else: agent = atari_dqn.get_agent(env) elif agent_name == 'matching_q': if load: agent = matching_q.load(l_dir, env) else: agent = matching_q.get_agent(env) elif agent_name == 'ec': if load: agent = ec.load(l_dir, env) else: agent = ec.get_agent(env) else: raise ValueError, '{} is not a valid agent name.'.format(agent_name) #knn = KNNQ(S, A, n_neighbors=5, memory_size=100000, memory_fit=100, lr=1.0, weights='distance') #agent = QLearning(S, A, Q=knn, name='KNN-1', random_start=random_start) # Perform test print 'Beginning training for {} episodes.'.format(n_episode) begin = timeit.default_timer() run(env, agent, n_episode, tMax, log_freq, render, monitor, plot, s_dir, save, save_freq, time) end = timeit.default_timer() dt = end - begin print 'Run time: {:}min {:.3}s'.format(dt // 60, dt % 60) if save: print 'Saving agent to {}'.format(s_dir) begin = timeit.default_timer() agent.save(s_dir) end = timeit.default_timer() dt = end - begin print 'Save time: {:}min {:.3}s'.format(dt // 60, dt % 60) if __name__ == '__main__': parser = argparse.ArgumentParser(description='RL testing script.') parser.add_argument('-e', '--environment', type=str, default='FrozenLake-v0') parser.add_argument('-a', '--agent', type=str, default='simple_dqn') parser.add_argument('-n', '--n_episode', type=int, default=100) parser.add_argument('--log', type=int, default=10) parser.add_argument('--save_freq', type=int, default=100) parser.add_argument('-l', '--load_dir', type=str, default='') parser.add_argument('-s', '--save_dir', type=str, default='') parser.add_argument('-i', '--interactive', action='store_true') args = parser.parse_args() test_session(args.environment, args.agent, args.n_episode, args.log, args.interactive, args.load_dir, args.save_dir, args.save_freq)
gpl-3.0
DucQuang1/py-earth
pyearth/test/test_earth.py
1
13869
''' Created on Feb 24, 2013 @author: jasonrudy ''' import pickle import copy import os from nose.tools import (assert_equal, assert_not_equal, assert_true, assert_almost_equal, assert_list_equal, assert_raises) import numpy from scipy.sparse import csr_matrix from sklearn.utils.validation import NotFittedError from pyearth._basis import (Basis, ConstantBasisFunction, HingeBasisFunction, LinearBasisFunction) from pyearth import Earth from .testing_utils import (if_statsmodels, if_pandas, if_patsy, if_environ_has, assert_list_almost_equal, assert_list_almost_equal_value) numpy.random.seed(0) basis = Basis(10) constant = ConstantBasisFunction() basis.append(constant) bf1 = HingeBasisFunction(constant, 0.1, 10, 1, False, 'x1') bf2 = HingeBasisFunction(constant, 0.1, 10, 1, True, 'x1') bf3 = LinearBasisFunction(bf1, 2, 'x2') basis.append(bf1) basis.append(bf2) basis.append(bf3) X = numpy.random.normal(size=(100, 10)) B = numpy.empty(shape=(100, 4), dtype=numpy.float64) basis.transform(X, B) beta = numpy.random.normal(size=4) y = numpy.empty(shape=100, dtype=numpy.float64) y[:] = numpy.dot(B, beta) + numpy.random.normal(size=100) default_params = {"penalty": 1} def test_get_params(): assert_equal( Earth().get_params(), {'penalty': None, 'min_search_points': None, 'endspan_alpha': None, 'check_every': None, 'max_terms': None, 'max_degree': None, 'minspan_alpha': None, 'thresh': None, 'minspan': None, 'endspan': None, 'allow_linear': None, 'smooth': None, 'enable_pruning': True}) assert_equal( Earth( max_degree=3).get_params(), {'penalty': None, 'min_search_points': None, 'endspan_alpha': None, 'check_every': None, 'max_terms': None, 'max_degree': 3, 'minspan_alpha': None, 'thresh': None, 'minspan': None, 'endspan': None, 'allow_linear': None, 'smooth': None, 'enable_pruning': True}) @if_statsmodels def test_linear_fit(): from statsmodels.regression.linear_model import GLS, OLS earth = Earth(**default_params) earth.fit(X, y) earth._Earth__linear_fit(X, y) soln = OLS(y, earth.transform(X)).fit().params assert_almost_equal(numpy.mean((earth.coef_ - soln) ** 2), 0.0) sample_weight = 1.0 / (numpy.random.normal(size=y.shape) ** 2) earth.fit(X, y) earth._Earth__linear_fit(X, y, sample_weight) soln = GLS(y, earth.transform( X), 1.0 / sample_weight).fit().params assert_almost_equal(numpy.mean((earth.coef_ - soln) ** 2), 0.0) def test_sample_weight(): group = numpy.random.binomial(1, .5, size=1000) == 1 sample_weight = 1 / (group * 100 + 1.0) x = numpy.random.uniform(-10, 10, size=1000) y = numpy.abs(x) y[group] = numpy.abs(x[group] - 5) y += numpy.random.normal(0, 1, size=1000) model = Earth().fit(x[:, numpy.newaxis], y, sample_weight=sample_weight) # Check that the model fits better for the more heavily weighted group assert_true(model.score(x[group], y[group]) < model.score( x[numpy.logical_not(group)], y[numpy.logical_not(group)])) # Make sure that the score function gives the same answer as the trace pruning_trace = model.pruning_trace() rsq_trace = pruning_trace.rsq(model.pruning_trace().get_selected()) assert_almost_equal(model.score(x, y, sample_weight=sample_weight), rsq_trace) # Uncomment below to see what this test situation looks like # from matplotlib import pyplot # print model.summary() # print model.score(x,y,sample_weight = sample_weight) # pyplot.figure() # pyplot.plot(x,y,'b.') # pyplot.plot(x,model.predict(x),'r.') # pyplot.show() def test_fit(): earth = Earth(**default_params) earth.fit(X, y) res = str(earth.trace()) + '\n' + earth.summary() filename = os.path.join(os.path.dirname(__file__), 'earth_regress.txt') with open(filename, 'r') as fl: prev = fl.read() assert_equal(res, prev) def test_smooth(): model = Earth(penalty=1, smooth=True) model.fit(X, y) res = str(model.trace()) + '\n' + model.summary() filename = os.path.join(os.path.dirname(__file__), 'earth_regress_smooth.txt') with open(filename, 'r') as fl: prev = fl.read() assert_equal(res, prev) def test_linvars(): earth = Earth(**default_params) earth.fit(X, y, linvars=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) res = str(earth.trace()) + '\n' + earth.summary() filename = os.path.join(os.path.dirname(__file__), 'earth_linvars_regress.txt') with open(filename, 'r') as fl: prev = fl.read() assert_equal(res, prev) def test_linvars_coefs(): nb_vars = 11 coefs = numpy.random.uniform(size=(nb_vars,)) X = numpy.random.uniform(size=(100, nb_vars)) bias = 1 y = numpy.dot(X, coefs[:, numpy.newaxis]) + bias earth = Earth(max_terms=nb_vars * 2, max_degree=1, enable_pruning=False, check_every=1, thresh=0, minspan=1, endspan=1).fit(X, y, linvars=range(nb_vars)) earth_bias = earth.coef_[0] earth_coefs = sorted(earth.coef_[1:]) assert_almost_equal(earth_bias, bias) assert_list_almost_equal(earth_coefs, sorted(coefs)) def test_score(): earth = Earth(**default_params) model = earth.fit(X, y) record = model.pruning_trace() rsq = record.rsq(record.get_selected()) assert_almost_equal(rsq, model.score(X, y)) @if_pandas @if_environ_has('test_pathological_cases') def test_pathological_cases(): import pandas directory = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'pathological_data') cases = {'issue_44': {}, 'issue_50': {'penalty': 0.5, 'minspan': 1, 'allow_linear': False, 'endspan': 1, 'check_every': 1, 'sample_weight': 'issue_50_weight.csv'}} for case, settings in cases.iteritems(): data = pandas.read_csv(os.path.join(directory, case + '.csv')) y = data['y'] del data['y'] X = data if 'sample_weight' in settings: filename = os.path.join(directory, settings['sample_weight']) sample_weight = pandas.read_csv(filename)['sample_weight'] del settings['sample_weight'] else: sample_weight = None model = Earth(**settings) model.fit(X, y, sample_weight=sample_weight) with open(os.path.join(directory, case + '.txt'), 'r') as infile: correct = infile.read() assert_equal(model.summary(), correct) @if_pandas def test_pandas_compatibility(): import pandas X_df = pandas.DataFrame(X) y_df = pandas.DataFrame(y) colnames = ['xx' + str(i) for i in range(X.shape[1])] X_df.columns = colnames earth = Earth(**default_params) model = earth.fit(X_df, y_df) assert_list_equal( colnames, model.forward_trace()._getstate()['xlabels']) @if_patsy @if_pandas def test_patsy_compatibility(): import pandas import patsy X_df = pandas.DataFrame(X) y_df = pandas.DataFrame(y) colnames = ['xx' + str(i) for i in range(X.shape[1])] X_df.columns = colnames X_df['y'] = y y_df, X_df = patsy.dmatrices( 'y ~ xx0 + xx1 + xx2 + xx3 + xx4 + xx5 + xx6 + xx7 + xx8 + xx9 - 1', data=X_df) model = Earth(**default_params).fit(X_df, y_df) assert_list_equal( colnames, model.forward_trace()._getstate()['xlabels']) def test_pickle_compatibility(): earth = Earth(**default_params) model = earth.fit(X, y) model_copy = pickle.loads(pickle.dumps(model)) assert_true(model_copy == model) assert_true( numpy.all(model.predict(X) == model_copy.predict(X))) assert_true(model.basis_[0] is model.basis_[1]._get_root()) assert_true(model_copy.basis_[0] is model_copy.basis_[1]._get_root()) def test_copy_compatibility(): model = Earth(**default_params).fit(X, y) model_copy = copy.copy(model) assert_true(model_copy == model) assert_true( numpy.all(model.predict(X) == model_copy.predict(X))) assert_true(model.basis_[0] is model.basis_[1]._get_root()) assert_true(model_copy.basis_[0] is model_copy.basis_[1]._get_root()) def test_exhaustive_search(): model = Earth(max_terms=13, enable_pruning=False, check_every=1, thresh=0, minspan=1, endspan=1) model.fit(X, y) assert_equal(len(model.basis_), len(model.coef_)) assert_equal(model.transform(X).shape[1], len(model.basis_)) def test_nb_terms(): for max_terms in (1, 3, 12, 13): model = Earth(max_terms=max_terms) model.fit(X, y) assert_true(len(model.basis_) <= max_terms) assert_true(len(model.coef_) <= len(model.basis_)) assert_true(len(model.coef_) >= 1) if max_terms == 1: assert_list_almost_equal_value(model.predict(X), y.mean()) def test_nb_degrees(): for max_degree in (1, 2, 12, 13): model = Earth(max_terms=10, max_degree=max_degree, enable_pruning=False, check_every=1, thresh=0, minspan=1, endspan=1) model.fit(X, y) for basis in model.basis_: assert_true(basis.degree() >= 0) assert_true(basis.degree() <= max_degree) def test_eq(): model1 = Earth(**default_params) model2 = Earth(**default_params) assert_equal(model1, model2) assert_not_equal(model1, 5) params = {} params.update(default_params) params["penalty"] = 15 model2 = Earth(**params) assert_not_equal(model1, model2) model3 = Earth(**default_params) model3.unknown_parameter = 5 assert_not_equal(model1, model3) def test_sparse(): X_sparse = csr_matrix(X) model = Earth(**default_params) assert_raises(TypeError, model.fit, X_sparse, y) model = Earth(**default_params) model.fit(X, y) assert_raises(TypeError, model.predict, X_sparse) assert_raises(TypeError, model.predict_deriv, X_sparse) assert_raises(TypeError, model.transform, X_sparse) assert_raises(TypeError, model.score, X_sparse) model = Earth(**default_params) sample_weight = csr_matrix([1.] * X.shape[0]) assert_raises(TypeError, model.fit, X, y, sample_weight) def test_shape(): model = Earth(**default_params) model.fit(X, y) X_reduced = X[:, 0:5] assert_raises(ValueError, model.predict, X_reduced) assert_raises(ValueError, model.predict_deriv, X_reduced) assert_raises(ValueError, model.transform, X_reduced) assert_raises(ValueError, model.score, X_reduced) model = Earth(**default_params) X_subsampled = X[0:10] assert_raises(ValueError, model.fit, X_subsampled, y) model = Earth(**default_params) y_subsampled = X[0:10] assert_raises(ValueError, model.fit, X, y_subsampled) model = Earth(**default_params) sample_weights = numpy.array([1.] * len(X)) sample_weights_subsampled = sample_weights[0:10] assert_raises(ValueError, model.fit, X, y, sample_weights_subsampled) def test_deriv(): model = Earth(**default_params) model.fit(X, y) assert_equal(X.shape, model.predict_deriv(X).shape) assert_equal((X.shape[0], 1), model.predict_deriv(X, variables=0).shape) assert_equal((X.shape[0], 1), model.predict_deriv(X, variables='x0').shape) assert_equal((X.shape[0], 3), model.predict_deriv(X, variables=[1, 5, 7]).shape) assert_equal((X.shape[0], 0), model.predict_deriv(X, variables=[]).shape) res_deriv = model.predict_deriv(X, variables=['x2', 'x7', 'x0', 'x1']) assert_equal((X.shape[0], 4), res_deriv.shape) res_deriv = model.predict_deriv(X, variables=['x0']) assert_equal((X.shape[0], 1), res_deriv.shape) assert_equal((X.shape[0], 1), model.predict_deriv(X, variables=[0]).shape) def test_xlabels(): model = Earth(**default_params) assert_raises(ValueError, model.fit, X[:, 0:5], y, xlabels=['var1', 'var2']) model = Earth(**default_params) model.fit(X[:, 0:3], y, xlabels=['var1', 'var2', 'var3']) model = Earth(**default_params) model.fit(X[:, 0:3], y, xlabels=['var1', 'var2', 'var3']) def test_untrained(): model = Earth(**default_params) assert_raises(NotFittedError, model.predict, X) assert_raises(NotFittedError, model.transform, X) assert_raises(NotFittedError, model.predict_deriv, X) assert_raises(NotFittedError, model.score, X) # the following should be changed to raise NotFittedError assert_equal(model.forward_trace(), None) assert_equal(model.pruning_trace(), None) assert_equal(model.summary(), "Untrained Earth Model")
bsd-3-clause
Srisai85/scikit-learn
examples/neighbors/plot_species_kde.py
282
4059
""" ================================================ Kernel Density Estimate of Species Distributions ================================================ This shows an example of a neighbors-based query (in particular a kernel density estimate) on geospatial data, using a Ball Tree built upon the Haversine distance metric -- i.e. distances over points in latitude/longitude. The dataset is provided by Phillips et. al. (2006). If available, the example uses `basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_ to plot the coast lines and national boundaries of South America. This example does not perform any learning over the data (see :ref:`example_applications_plot_species_distribution_modeling.py` for an example of classification based on the attributes in this dataset). It simply shows the kernel density estimate of observed data points in geospatial coordinates. The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References ---------- * `"Maximum entropy modeling of species geographic distributions" <http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. """ # Author: Jake Vanderplas <jakevdp@cs.washington.edu> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_species_distributions from sklearn.datasets.species_distributions import construct_grids from sklearn.neighbors import KernelDensity # if basemap is available, we'll use it. # otherwise, we'll improvise later... try: from mpl_toolkits.basemap import Basemap basemap = True except ImportError: basemap = False # Get matrices/arrays of species IDs and locations data = fetch_species_distributions() species_names = ['Bradypus Variegatus', 'Microryzomys Minutus'] Xtrain = np.vstack([data['train']['dd lat'], data['train']['dd long']]).T ytrain = np.array([d.decode('ascii').startswith('micro') for d in data['train']['species']], dtype='int') Xtrain *= np.pi / 180. # Convert lat/long to radians # Set up the data grid for the contour plot xgrid, ygrid = construct_grids(data) X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1]) land_reference = data.coverages[6][::5, ::5] land_mask = (land_reference > -9999).ravel() xy = np.vstack([Y.ravel(), X.ravel()]).T xy = xy[land_mask] xy *= np.pi / 180. # Plot map of South America with distributions of each species fig = plt.figure() fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05) for i in range(2): plt.subplot(1, 2, i + 1) # construct a kernel density estimate of the distribution print(" - computing KDE in spherical coordinates") kde = KernelDensity(bandwidth=0.04, metric='haversine', kernel='gaussian', algorithm='ball_tree') kde.fit(Xtrain[ytrain == i]) # evaluate only on the land: -9999 indicates ocean Z = -9999 + np.zeros(land_mask.shape[0]) Z[land_mask] = np.exp(kde.score_samples(xy)) Z = Z.reshape(X.shape) # plot contours of the density levels = np.linspace(0, Z.max(), 25) plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds) if basemap: print(" - plot coastlines using basemap") m = Basemap(projection='cyl', llcrnrlat=Y.min(), urcrnrlat=Y.max(), llcrnrlon=X.min(), urcrnrlon=X.max(), resolution='c') m.drawcoastlines() m.drawcountries() else: print(" - plot coastlines from coverage") plt.contour(X, Y, land_reference, levels=[-9999], colors="k", linestyles="solid") plt.xticks([]) plt.yticks([]) plt.title(species_names[i]) plt.show()
bsd-3-clause
zhenv5/scikit-learn
sklearn/linear_model/bayes.py
220
15248
""" Various bayesian regression """ from __future__ import print_function # Authors: V. Michel, F. Pedregosa, A. Gramfort # License: BSD 3 clause from math import log import numpy as np from scipy import linalg from .base import LinearModel from ..base import RegressorMixin from ..utils.extmath import fast_logdet, pinvh from ..utils import check_X_y ############################################################################### # BayesianRidge regression class BayesianRidge(LinearModel, RegressorMixin): """Bayesian ridge regression Fit a Bayesian ridge model and optimize the regularization parameters lambda (precision of the weights) and alpha (precision of the noise). Read more in the :ref:`User Guide <bayesian_regression>`. Parameters ---------- n_iter : int, optional Maximum number of iterations. Default is 300. tol : float, optional Stop the algorithm if w has converged. Default is 1.e-3. alpha_1 : float, optional Hyper-parameter : shape parameter for the Gamma distribution prior over the alpha parameter. Default is 1.e-6 alpha_2 : float, optional Hyper-parameter : inverse scale parameter (rate parameter) for the Gamma distribution prior over the alpha parameter. Default is 1.e-6. lambda_1 : float, optional Hyper-parameter : shape parameter for the Gamma distribution prior over the lambda parameter. Default is 1.e-6. lambda_2 : float, optional Hyper-parameter : inverse scale parameter (rate parameter) for the Gamma distribution prior over the lambda parameter. Default is 1.e-6 compute_score : boolean, optional If True, compute the objective function at each step of the model. Default is False fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). Default is True. normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. verbose : boolean, optional, default False Verbose mode when fitting the model. Attributes ---------- coef_ : array, shape = (n_features) Coefficients of the regression model (mean of distribution) alpha_ : float estimated precision of the noise. lambda_ : array, shape = (n_features) estimated precisions of the weights. scores_ : float if computed, value of the objective function (to be maximized) Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.BayesianRidge() >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2]) ... # doctest: +NORMALIZE_WHITESPACE BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False, copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06, n_iter=300, normalize=False, tol=0.001, verbose=False) >>> clf.predict([[1, 1]]) array([ 1.]) Notes ----- See examples/linear_model/plot_bayesian_ridge.py for an example. """ def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6, lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False, fit_intercept=True, normalize=False, copy_X=True, verbose=False): self.n_iter = n_iter self.tol = tol self.alpha_1 = alpha_1 self.alpha_2 = alpha_2 self.lambda_1 = lambda_1 self.lambda_2 = lambda_2 self.compute_score = compute_score self.fit_intercept = fit_intercept self.normalize = normalize self.copy_X = copy_X self.verbose = verbose def fit(self, X, y): """Fit the model Parameters ---------- X : numpy array of shape [n_samples,n_features] Training data y : numpy array of shape [n_samples] Target values Returns ------- self : returns an instance of self. """ X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True) X, y, X_mean, y_mean, X_std = self._center_data( X, y, self.fit_intercept, self.normalize, self.copy_X) n_samples, n_features = X.shape ### Initialization of the values of the parameters alpha_ = 1. / np.var(y) lambda_ = 1. verbose = self.verbose lambda_1 = self.lambda_1 lambda_2 = self.lambda_2 alpha_1 = self.alpha_1 alpha_2 = self.alpha_2 self.scores_ = list() coef_old_ = None XT_y = np.dot(X.T, y) U, S, Vh = linalg.svd(X, full_matrices=False) eigen_vals_ = S ** 2 ### Convergence loop of the bayesian ridge regression for iter_ in range(self.n_iter): ### Compute mu and sigma # sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X) # coef_ = sigma_^-1 * XT * y if n_samples > n_features: coef_ = np.dot(Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, None]) coef_ = np.dot(coef_, XT_y) if self.compute_score: logdet_sigma_ = - np.sum( np.log(lambda_ + alpha_ * eigen_vals_)) else: coef_ = np.dot(X.T, np.dot( U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T)) coef_ = np.dot(coef_, y) if self.compute_score: logdet_sigma_ = lambda_ * np.ones(n_features) logdet_sigma_[:n_samples] += alpha_ * eigen_vals_ logdet_sigma_ = - np.sum(np.log(logdet_sigma_)) ### Update alpha and lambda rmse_ = np.sum((y - np.dot(X, coef_)) ** 2) gamma_ = (np.sum((alpha_ * eigen_vals_) / (lambda_ + alpha_ * eigen_vals_))) lambda_ = ((gamma_ + 2 * lambda_1) / (np.sum(coef_ ** 2) + 2 * lambda_2)) alpha_ = ((n_samples - gamma_ + 2 * alpha_1) / (rmse_ + 2 * alpha_2)) ### Compute the objective function if self.compute_score: s = lambda_1 * log(lambda_) - lambda_2 * lambda_ s += alpha_1 * log(alpha_) - alpha_2 * alpha_ s += 0.5 * (n_features * log(lambda_) + n_samples * log(alpha_) - alpha_ * rmse_ - (lambda_ * np.sum(coef_ ** 2)) - logdet_sigma_ - n_samples * log(2 * np.pi)) self.scores_.append(s) ### Check for convergence if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol: if verbose: print("Convergence after ", str(iter_), " iterations") break coef_old_ = np.copy(coef_) self.alpha_ = alpha_ self.lambda_ = lambda_ self.coef_ = coef_ self._set_intercept(X_mean, y_mean, X_std) return self ############################################################################### # ARD (Automatic Relevance Determination) regression class ARDRegression(LinearModel, RegressorMixin): """Bayesian ARD regression. Fit the weights of a regression model, using an ARD prior. The weights of the regression model are assumed to be in Gaussian distributions. Also estimate the parameters lambda (precisions of the distributions of the weights) and alpha (precision of the distribution of the noise). The estimation is done by an iterative procedures (Evidence Maximization) Read more in the :ref:`User Guide <bayesian_regression>`. Parameters ---------- n_iter : int, optional Maximum number of iterations. Default is 300 tol : float, optional Stop the algorithm if w has converged. Default is 1.e-3. alpha_1 : float, optional Hyper-parameter : shape parameter for the Gamma distribution prior over the alpha parameter. Default is 1.e-6. alpha_2 : float, optional Hyper-parameter : inverse scale parameter (rate parameter) for the Gamma distribution prior over the alpha parameter. Default is 1.e-6. lambda_1 : float, optional Hyper-parameter : shape parameter for the Gamma distribution prior over the lambda parameter. Default is 1.e-6. lambda_2 : float, optional Hyper-parameter : inverse scale parameter (rate parameter) for the Gamma distribution prior over the lambda parameter. Default is 1.e-6. compute_score : boolean, optional If True, compute the objective function at each step of the model. Default is False. threshold_lambda : float, optional threshold for removing (pruning) weights with high precision from the computation. Default is 1.e+4. fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). Default is True. normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. copy_X : boolean, optional, default True. If True, X will be copied; else, it may be overwritten. verbose : boolean, optional, default False Verbose mode when fitting the model. Attributes ---------- coef_ : array, shape = (n_features) Coefficients of the regression model (mean of distribution) alpha_ : float estimated precision of the noise. lambda_ : array, shape = (n_features) estimated precisions of the weights. sigma_ : array, shape = (n_features, n_features) estimated variance-covariance matrix of the weights scores_ : float if computed, value of the objective function (to be maximized) Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.ARDRegression() >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2]) ... # doctest: +NORMALIZE_WHITESPACE ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False, copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06, n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001, verbose=False) >>> clf.predict([[1, 1]]) array([ 1.]) Notes -------- See examples/linear_model/plot_ard.py for an example. """ def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6, lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False, threshold_lambda=1.e+4, fit_intercept=True, normalize=False, copy_X=True, verbose=False): self.n_iter = n_iter self.tol = tol self.fit_intercept = fit_intercept self.normalize = normalize self.alpha_1 = alpha_1 self.alpha_2 = alpha_2 self.lambda_1 = lambda_1 self.lambda_2 = lambda_2 self.compute_score = compute_score self.threshold_lambda = threshold_lambda self.copy_X = copy_X self.verbose = verbose def fit(self, X, y): """Fit the ARDRegression model according to the given training data and parameters. Iterative procedure to maximize the evidence Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array, shape = [n_samples] Target values (integers) Returns ------- self : returns an instance of self. """ X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True) n_samples, n_features = X.shape coef_ = np.zeros(n_features) X, y, X_mean, y_mean, X_std = self._center_data( X, y, self.fit_intercept, self.normalize, self.copy_X) ### Launch the convergence loop keep_lambda = np.ones(n_features, dtype=bool) lambda_1 = self.lambda_1 lambda_2 = self.lambda_2 alpha_1 = self.alpha_1 alpha_2 = self.alpha_2 verbose = self.verbose ### Initialization of the values of the parameters alpha_ = 1. / np.var(y) lambda_ = np.ones(n_features) self.scores_ = list() coef_old_ = None ### Iterative procedure of ARDRegression for iter_ in range(self.n_iter): ### Compute mu and sigma (using Woodbury matrix identity) sigma_ = pinvh(np.eye(n_samples) / alpha_ + np.dot(X[:, keep_lambda] * np.reshape(1. / lambda_[keep_lambda], [1, -1]), X[:, keep_lambda].T)) sigma_ = np.dot(sigma_, X[:, keep_lambda] * np.reshape(1. / lambda_[keep_lambda], [1, -1])) sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) * X[:, keep_lambda].T, sigma_) sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda] coef_[keep_lambda] = alpha_ * np.dot( sigma_, np.dot(X[:, keep_lambda].T, y)) ### Update alpha and lambda rmse_ = np.sum((y - np.dot(X, coef_)) ** 2) gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_) lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) / ((coef_[keep_lambda]) ** 2 + 2. * lambda_2)) alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) / (rmse_ + 2. * alpha_2)) ### Prune the weights with a precision over a threshold keep_lambda = lambda_ < self.threshold_lambda coef_[~keep_lambda] = 0 ### Compute the objective function if self.compute_score: s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum() s += alpha_1 * log(alpha_) - alpha_2 * alpha_ s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) + np.sum(np.log(lambda_))) s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum()) self.scores_.append(s) ### Check for convergence if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol: if verbose: print("Converged after %s iterations" % iter_) break coef_old_ = np.copy(coef_) self.coef_ = coef_ self.alpha_ = alpha_ self.sigma_ = sigma_ self.lambda_ = lambda_ self._set_intercept(X_mean, y_mean, X_std) return self
bsd-3-clause
OpringaoDoTurno/airflow
docs/conf.py
23
8948
# -*- coding: utf-8 -*- # # Airflow documentation build configuration file, created by # sphinx-quickstart on Thu Oct 9 20:50:01 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys import mock MOCK_MODULES = [ 'apiclient', 'apiclient.discovery', 'apiclient.http', 'mesos', 'mesos.interface', 'mesos.native', 'oauth2client.service_account', 'pandas.io.gbq', ] for mod_name in MOCK_MODULES: sys.modules[mod_name] = mock.Mock() # Hack to allow changing for piece of the code to behave differently while # the docs are being built. The main objective was to alter the # behavior of the utils.apply_default that was hiding function headers os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE' from airflow import settings # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinxarg.ext', ] viewcode_import = True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Airflow' #copyright = u'' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. #version = '1.0.0' # The full version, including alpha/beta/rc tags. #release = '1.0.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] import sphinx_rtd_theme html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = "Airflow Documentation" # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = "" # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Airflowdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'Airflow.tex', u'Airflow Documentation', u'Apache Airflow', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'airflow', u'Airflow Documentation', [u'Apache Airflow'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [( 'index', 'Airflow', u'Airflow Documentation', u'Apache Airflow', 'Airflow', 'Airflow is a system to programmaticaly author, schedule and monitor data pipelines.', 'Miscellaneous' ),] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
apache-2.0
Aasmi/scikit-learn
examples/exercises/plot_iris_exercise.py
323
1602
""" ================================ SVM Exercise ================================ A tutorial exercise for using different SVM kernels. This exercise is used in the :ref:`using_kernels_tut` part of the :ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import datasets, svm iris = datasets.load_iris() X = iris.data y = iris.target X = X[y != 0, :2] y = y[y != 0] n_sample = len(X) np.random.seed(0) order = np.random.permutation(n_sample) X = X[order] y = y[order].astype(np.float) X_train = X[:.9 * n_sample] y_train = y[:.9 * n_sample] X_test = X[.9 * n_sample:] y_test = y[.9 * n_sample:] # fit the model for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')): clf = svm.SVC(kernel=kernel, gamma=10) clf.fit(X_train, y_train) plt.figure(fig_num) plt.clf() plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired) # Circle out the test data plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10) plt.axis('tight') x_min = X[:, 0].min() x_max = X[:, 0].max() y_min = X[:, 1].min() y_max = X[:, 1].max() XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j] Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()]) # Put the result into a color plot Z = Z.reshape(XX.shape) plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired) plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'], levels=[-.5, 0, .5]) plt.title(kernel) plt.show()
bsd-3-clause
mwv/scikit-learn
sklearn/decomposition/tests/test_truncated_svd.py
240
6055
"""Test truncated SVD transformer.""" import numpy as np import scipy.sparse as sp from sklearn.decomposition import TruncatedSVD from sklearn.utils import check_random_state from sklearn.utils.testing import (assert_array_almost_equal, assert_equal, assert_raises, assert_greater, assert_array_less) # Make an X that looks somewhat like a small tf-idf matrix. # XXX newer versions of SciPy have scipy.sparse.rand for this. shape = 60, 55 n_samples, n_features = shape rng = check_random_state(42) X = rng.randint(-100, 20, np.product(shape)).reshape(shape) X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64) X.data[:] = 1 + np.log(X.data) Xdense = X.A def test_algorithms(): svd_a = TruncatedSVD(30, algorithm="arpack") svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42) Xa = svd_a.fit_transform(X)[:, :6] Xr = svd_r.fit_transform(X)[:, :6] assert_array_almost_equal(Xa, Xr) comp_a = np.abs(svd_a.components_) comp_r = np.abs(svd_r.components_) # All elements are equal, but some elements are more equal than others. assert_array_almost_equal(comp_a[:9], comp_r[:9]) assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3) def test_attributes(): for n_components in (10, 25, 41): tsvd = TruncatedSVD(n_components).fit(X) assert_equal(tsvd.n_components, n_components) assert_equal(tsvd.components_.shape, (n_components, n_features)) def test_too_many_components(): for algorithm in ["arpack", "randomized"]: for n_components in (n_features, n_features+1): tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm) assert_raises(ValueError, tsvd.fit, X) def test_sparse_formats(): for fmt in ("array", "csr", "csc", "coo", "lil"): Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)() tsvd = TruncatedSVD(n_components=11) Xtrans = tsvd.fit_transform(Xfmt) assert_equal(Xtrans.shape, (n_samples, 11)) Xtrans = tsvd.transform(Xfmt) assert_equal(Xtrans.shape, (n_samples, 11)) def test_inverse_transform(): for algo in ("arpack", "randomized"): # We need a lot of components for the reconstruction to be "almost # equal" in all positions. XXX Test means or sums instead? tsvd = TruncatedSVD(n_components=52, random_state=42) Xt = tsvd.fit_transform(X) Xinv = tsvd.inverse_transform(Xt) assert_array_almost_equal(Xinv, Xdense, decimal=1) def test_integers(): Xint = X.astype(np.int64) tsvd = TruncatedSVD(n_components=6) Xtrans = tsvd.fit_transform(Xint) assert_equal(Xtrans.shape, (n_samples, tsvd.n_components)) def test_explained_variance(): # Test sparse data svd_a_10_sp = TruncatedSVD(10, algorithm="arpack") svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42) svd_a_20_sp = TruncatedSVD(20, algorithm="arpack") svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42) X_trans_a_10_sp = svd_a_10_sp.fit_transform(X) X_trans_r_10_sp = svd_r_10_sp.fit_transform(X) X_trans_a_20_sp = svd_a_20_sp.fit_transform(X) X_trans_r_20_sp = svd_r_20_sp.fit_transform(X) # Test dense data svd_a_10_de = TruncatedSVD(10, algorithm="arpack") svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42) svd_a_20_de = TruncatedSVD(20, algorithm="arpack") svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42) X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray()) X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray()) X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray()) X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray()) # helper arrays for tests below svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de, svd_r_10_de, svd_a_20_de, svd_r_20_de) svds_trans = ( (svd_a_10_sp, X_trans_a_10_sp), (svd_r_10_sp, X_trans_r_10_sp), (svd_a_20_sp, X_trans_a_20_sp), (svd_r_20_sp, X_trans_r_20_sp), (svd_a_10_de, X_trans_a_10_de), (svd_r_10_de, X_trans_r_10_de), (svd_a_20_de, X_trans_a_20_de), (svd_r_20_de, X_trans_r_20_de), ) svds_10_v_20 = ( (svd_a_10_sp, svd_a_20_sp), (svd_r_10_sp, svd_r_20_sp), (svd_a_10_de, svd_a_20_de), (svd_r_10_de, svd_r_20_de), ) svds_sparse_v_dense = ( (svd_a_10_sp, svd_a_10_de), (svd_a_20_sp, svd_a_20_de), (svd_r_10_sp, svd_r_10_de), (svd_r_20_sp, svd_r_20_de), ) # Assert the 1st component is equal for svd_10, svd_20 in svds_10_v_20: assert_array_almost_equal( svd_10.explained_variance_ratio_, svd_20.explained_variance_ratio_[:10], decimal=5, ) # Assert that 20 components has higher explained variance than 10 for svd_10, svd_20 in svds_10_v_20: assert_greater( svd_20.explained_variance_ratio_.sum(), svd_10.explained_variance_ratio_.sum(), ) # Assert that all the values are greater than 0 for svd in svds: assert_array_less(0.0, svd.explained_variance_ratio_) # Assert that total explained variance is less than 1 for svd in svds: assert_array_less(svd.explained_variance_ratio_.sum(), 1.0) # Compare sparse vs. dense for svd_sparse, svd_dense in svds_sparse_v_dense: assert_array_almost_equal(svd_sparse.explained_variance_ratio_, svd_dense.explained_variance_ratio_) # Test that explained_variance is correct for svd, transformed in svds_trans: total_variance = np.var(X.toarray(), axis=0).sum() variances = np.var(transformed, axis=0) true_explained_variance_ratio = variances / total_variance assert_array_almost_equal( svd.explained_variance_ratio_, true_explained_variance_ratio, )
bsd-3-clause
hwroitzsch/BikersLifeSaver
lib/python3.5/site-packages/numpy/core/tests/test_multiarray.py
2
220131
from __future__ import division, absolute_import, print_function import collections import tempfile import sys import shutil import warnings import operator import io import itertools if sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins from decimal import Decimal import numpy as np from nose import SkipTest from numpy.compat import asbytes, getexception, strchar, unicode, sixu from test_print import in_foreign_locale from numpy.core.multiarray_tests import ( test_neighborhood_iterator, test_neighborhood_iterator_oob, test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end, test_inplace_increment, get_buffer_info, test_as_c_array ) from numpy.testing import ( TestCase, run_module_suite, assert_, assert_raises, assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_allclose, assert_array_less, runstring, dec ) # Need to test an object that does not fully implement math interface from datetime import timedelta if sys.version_info[:2] > (3, 2): # In Python 3.3 the representation of empty shape, strides and suboffsets # is an empty tuple instead of None. # http://docs.python.org/dev/whatsnew/3.3.html#api-changes EMPTY = () else: EMPTY = None class TestFlags(TestCase): def setUp(self): self.a = np.arange(10) def test_writeable(self): mydict = locals() self.a.flags.writeable = False self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict) self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict) self.a.flags.writeable = True self.a[0] = 5 self.a[0] = 0 def test_otherflags(self): assert_equal(self.a.flags.carray, True) assert_equal(self.a.flags.farray, False) assert_equal(self.a.flags.behaved, True) assert_equal(self.a.flags.fnc, False) assert_equal(self.a.flags.forc, True) assert_equal(self.a.flags.owndata, True) assert_equal(self.a.flags.writeable, True) assert_equal(self.a.flags.aligned, True) assert_equal(self.a.flags.updateifcopy, False) def test_string_align(self): a = np.zeros(4, dtype=np.dtype('|S4')) assert_(a.flags.aligned) # not power of two are accessed bytewise and thus considered aligned a = np.zeros(5, dtype=np.dtype('|S4')) assert_(a.flags.aligned) def test_void_align(self): a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")])) assert_(a.flags.aligned) class TestHash(TestCase): # see #3793 def test_int(self): for st, ut, s in [(np.int8, np.uint8, 8), (np.int16, np.uint16, 16), (np.int32, np.uint32, 32), (np.int64, np.uint64, 64)]: for i in range(1, s): assert_equal(hash(st(-2**i)), hash(-2**i), err_msg="%r: -2**%d" % (st, i)) assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), err_msg="%r: 2**%d" % (st, i - 1)) assert_equal(hash(st(2**i - 1)), hash(2**i - 1), err_msg="%r: 2**%d - 1" % (st, i)) i = max(i - 1, 1) assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), err_msg="%r: 2**%d" % (ut, i - 1)) assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), err_msg="%r: 2**%d - 1" % (ut, i)) class TestAttributes(TestCase): def setUp(self): self.one = np.arange(10) self.two = np.arange(20).reshape(4, 5) self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) def test_attributes(self): assert_equal(self.one.shape, (10,)) assert_equal(self.two.shape, (4, 5)) assert_equal(self.three.shape, (2, 5, 6)) self.three.shape = (10, 3, 2) assert_equal(self.three.shape, (10, 3, 2)) self.three.shape = (2, 5, 6) assert_equal(self.one.strides, (self.one.itemsize,)) num = self.two.itemsize assert_equal(self.two.strides, (5*num, num)) num = self.three.itemsize assert_equal(self.three.strides, (30*num, 6*num, num)) assert_equal(self.one.ndim, 1) assert_equal(self.two.ndim, 2) assert_equal(self.three.ndim, 3) num = self.two.itemsize assert_equal(self.two.size, 20) assert_equal(self.two.nbytes, 20*num) assert_equal(self.two.itemsize, self.two.dtype.itemsize) assert_equal(self.two.base, np.arange(20)) def test_dtypeattr(self): assert_equal(self.one.dtype, np.dtype(np.int_)) assert_equal(self.three.dtype, np.dtype(np.float_)) assert_equal(self.one.dtype.char, 'l') assert_equal(self.three.dtype.char, 'd') self.assertTrue(self.three.dtype.str[0] in '<>') assert_equal(self.one.dtype.str[1], 'i') assert_equal(self.three.dtype.str[1], 'f') def test_int_subclassing(self): # Regression test for https://github.com/numpy/numpy/pull/3526 numpy_int = np.int_(0) if sys.version_info[0] >= 3: # On Py3k int_ should not inherit from int, because it's not fixed-width anymore assert_equal(isinstance(numpy_int, int), False) else: # Otherwise, it should inherit from int... assert_equal(isinstance(numpy_int, int), True) # ... and fast-path checks on C-API level should also work from numpy.core.multiarray_tests import test_int_subclass assert_equal(test_int_subclass(numpy_int), True) def test_stridesattr(self): x = self.one def make_array(size, offset, strides): return np.ndarray(size, buffer=x, dtype=int, offset=offset*x.itemsize, strides=strides*x.itemsize) assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) self.assertRaises(ValueError, make_array, 4, 4, -2) self.assertRaises(ValueError, make_array, 4, 2, -1) self.assertRaises(ValueError, make_array, 8, 3, 1) assert_equal(make_array(8, 3, 0), np.array([3]*8)) # Check behavior reported in gh-2503: self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3])) make_array(0, 0, 10) def test_set_stridesattr(self): x = self.one def make_array(size, offset, strides): try: r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize) except: raise RuntimeError(getexception()) r.strides = strides = strides*x.itemsize return r assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9])) self.assertRaises(ValueError, make_array, 4, 4, -2) self.assertRaises(ValueError, make_array, 4, 2, -1) self.assertRaises(RuntimeError, make_array, 8, 3, 1) # Check that the true extent of the array is used. # Test relies on as_strided base not exposing a buffer. x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) def set_strides(arr, strides): arr.strides = strides self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) # Test for offset calculations: x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], shape=(10,), strides=(-1,)) self.assertRaises(ValueError, set_strides, x[::-1], -1) a = x[::-1] a.strides = 1 a[::2].strides = 2 def test_fill(self): for t in "?bhilqpBHILQPfdgFDGO": x = np.empty((3, 2, 1), t) y = np.empty((3, 2, 1), t) x.fill(1) y[...] = 1 assert_equal(x, y) def test_fill_max_uint64(self): x = np.empty((3, 2, 1), dtype=np.uint64) y = np.empty((3, 2, 1), dtype=np.uint64) value = 2**64 - 1 y[...] = value x.fill(value) assert_array_equal(x, y) def test_fill_struct_array(self): # Filling from a scalar x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8') x.fill(x[0]) assert_equal(x['f1'][1], x['f1'][0]) # Filling from a tuple that can be converted # to a scalar x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')]) x.fill((3.5, -2)) assert_array_equal(x['a'], [3.5, 3.5]) assert_array_equal(x['b'], [-2, -2]) class TestArrayConstruction(TestCase): def test_array(self): d = np.ones(6) r = np.array([d, d]) assert_equal(r, np.ones((2, 6))) d = np.ones(6) tgt = np.ones((2, 6)) r = np.array([d, d]) assert_equal(r, tgt) tgt[1] = 2 r = np.array([d, d + 1]) assert_equal(r, tgt) d = np.ones(6) r = np.array([[d, d]]) assert_equal(r, np.ones((1, 2, 6))) d = np.ones(6) r = np.array([[d, d], [d, d]]) assert_equal(r, np.ones((2, 2, 6))) d = np.ones((6, 6)) r = np.array([d, d]) assert_equal(r, np.ones((2, 6, 6))) d = np.ones((6, )) r = np.array([[d, d + 1], d + 2]) assert_equal(len(r), 2) assert_equal(r[0], [d, d + 1]) assert_equal(r[1], d + 2) tgt = np.ones((2, 3), dtype=np.bool) tgt[0, 2] = False tgt[1, 0:2] = False r = np.array([[True, True, False], [False, False, True]]) assert_equal(r, tgt) r = np.array([[True, False], [True, False], [False, True]]) assert_equal(r, tgt.T) def test_array_empty(self): assert_raises(TypeError, np.array) def test_array_copy_false(self): d = np.array([1, 2, 3]) e = np.array(d, copy=False) d[1] = 3 assert_array_equal(e, [1, 3, 3]) e = np.array(d, copy=False, order='F') d[1] = 4 assert_array_equal(e, [1, 4, 3]) e[2] = 7 assert_array_equal(d, [1, 4, 7]) def test_array_copy_true(self): d = np.array([[1,2,3], [1, 2, 3]]) e = np.array(d, copy=True) d[0, 1] = 3 e[0, 2] = -7 assert_array_equal(e, [[1, 2, -7], [1, 2, 3]]) assert_array_equal(d, [[1, 3, 3], [1, 2, 3]]) e = np.array(d, copy=True, order='F') d[0, 1] = 5 e[0, 2] = 7 assert_array_equal(e, [[1, 3, 7], [1, 2, 3]]) assert_array_equal(d, [[1, 5, 3], [1,2,3]]) def test_array_cont(self): d = np.ones(10)[::2] assert_(np.ascontiguousarray(d).flags.c_contiguous) assert_(np.ascontiguousarray(d).flags.f_contiguous) assert_(np.asfortranarray(d).flags.c_contiguous) assert_(np.asfortranarray(d).flags.f_contiguous) d = np.ones((10, 10))[::2,::2] assert_(np.ascontiguousarray(d).flags.c_contiguous) assert_(np.asfortranarray(d).flags.f_contiguous) class TestAssignment(TestCase): def test_assignment_broadcasting(self): a = np.arange(6).reshape(2, 3) # Broadcasting the input to the output a[...] = np.arange(3) assert_equal(a, [[0, 1, 2], [0, 1, 2]]) a[...] = np.arange(2).reshape(2, 1) assert_equal(a, [[0, 0, 0], [1, 1, 1]]) # For compatibility with <= 1.5, a limited version of broadcasting # the output to the input. # # This behavior is inconsistent with NumPy broadcasting # in general, because it only uses one of the two broadcasting # rules (adding a new "1" dimension to the left of the shape), # applied to the output instead of an input. In NumPy 2.0, this kind # of broadcasting assignment will likely be disallowed. a[...] = np.arange(6)[::-1].reshape(1, 2, 3) assert_equal(a, [[5, 4, 3], [2, 1, 0]]) # The other type of broadcasting would require a reduction operation. def assign(a, b): a[...] = b assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3)) def test_assignment_errors(self): # Address issue #2276 class C: pass a = np.zeros(1) def assign(v): a[0] = v assert_raises((AttributeError, TypeError), assign, C()) assert_raises(ValueError, assign, [1]) class TestDtypedescr(TestCase): def test_construction(self): d1 = np.dtype('i4') assert_equal(d1, np.dtype(np.int32)) d2 = np.dtype('f8') assert_equal(d2, np.dtype(np.float64)) def test_byteorders(self): self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4')) self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')])) class TestZeroRank(TestCase): def setUp(self): self.d = np.array(0), np.array('x', object) def test_ellipsis_subscript(self): a, b = self.d self.assertEqual(a[...], 0) self.assertEqual(b[...], 'x') self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9. self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9. def test_empty_subscript(self): a, b = self.d self.assertEqual(a[()], 0) self.assertEqual(b[()], 'x') self.assertTrue(type(a[()]) is a.dtype.type) self.assertTrue(type(b[()]) is str) def test_invalid_subscript(self): a, b = self.d self.assertRaises(IndexError, lambda x: x[0], a) self.assertRaises(IndexError, lambda x: x[0], b) self.assertRaises(IndexError, lambda x: x[np.array([], int)], a) self.assertRaises(IndexError, lambda x: x[np.array([], int)], b) def test_ellipsis_subscript_assignment(self): a, b = self.d a[...] = 42 self.assertEqual(a, 42) b[...] = '' self.assertEqual(b.item(), '') def test_empty_subscript_assignment(self): a, b = self.d a[()] = 42 self.assertEqual(a, 42) b[()] = '' self.assertEqual(b.item(), '') def test_invalid_subscript_assignment(self): a, b = self.d def assign(x, i, v): x[i] = v self.assertRaises(IndexError, assign, a, 0, 42) self.assertRaises(IndexError, assign, b, 0, '') self.assertRaises(ValueError, assign, a, (), '') def test_newaxis(self): a, b = self.d self.assertEqual(a[np.newaxis].shape, (1,)) self.assertEqual(a[..., np.newaxis].shape, (1,)) self.assertEqual(a[np.newaxis, ...].shape, (1,)) self.assertEqual(a[..., np.newaxis].shape, (1,)) self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1)) self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1)) self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1)) self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10) def test_invalid_newaxis(self): a, b = self.d def subscript(x, i): x[i] self.assertRaises(IndexError, subscript, a, (np.newaxis, 0)) self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50) def test_constructor(self): x = np.ndarray(()) x[()] = 5 self.assertEqual(x[()], 5) y = np.ndarray((), buffer=x) y[()] = 6 self.assertEqual(x[()], 6) def test_output(self): x = np.array(2) self.assertRaises(ValueError, np.add, x, [1], x) class TestScalarIndexing(TestCase): def setUp(self): self.d = np.array([0, 1])[0] def test_ellipsis_subscript(self): a = self.d self.assertEqual(a[...], 0) self.assertEqual(a[...].shape, ()) def test_empty_subscript(self): a = self.d self.assertEqual(a[()], 0) self.assertEqual(a[()].shape, ()) def test_invalid_subscript(self): a = self.d self.assertRaises(IndexError, lambda x: x[0], a) self.assertRaises(IndexError, lambda x: x[np.array([], int)], a) def test_invalid_subscript_assignment(self): a = self.d def assign(x, i, v): x[i] = v self.assertRaises(TypeError, assign, a, 0, 42) def test_newaxis(self): a = self.d self.assertEqual(a[np.newaxis].shape, (1,)) self.assertEqual(a[..., np.newaxis].shape, (1,)) self.assertEqual(a[np.newaxis, ...].shape, (1,)) self.assertEqual(a[..., np.newaxis].shape, (1,)) self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1)) self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1)) self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1)) self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10) def test_invalid_newaxis(self): a = self.d def subscript(x, i): x[i] self.assertRaises(IndexError, subscript, a, (np.newaxis, 0)) self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50) def test_overlapping_assignment(self): # With positive strides a = np.arange(4) a[:-1] = a[1:] assert_equal(a, [1, 2, 3, 3]) a = np.arange(4) a[1:] = a[:-1] assert_equal(a, [0, 0, 1, 2]) # With positive and negative strides a = np.arange(4) a[:] = a[::-1] assert_equal(a, [3, 2, 1, 0]) a = np.arange(6).reshape(2, 3) a[::-1,:] = a[:, ::-1] assert_equal(a, [[5, 4, 3], [2, 1, 0]]) a = np.arange(6).reshape(2, 3) a[::-1, ::-1] = a[:, ::-1] assert_equal(a, [[3, 4, 5], [0, 1, 2]]) # With just one element overlapping a = np.arange(5) a[:3] = a[2:] assert_equal(a, [2, 3, 4, 3, 4]) a = np.arange(5) a[2:] = a[:3] assert_equal(a, [0, 1, 0, 1, 2]) a = np.arange(5) a[2::-1] = a[2:] assert_equal(a, [4, 3, 2, 3, 4]) a = np.arange(5) a[2:] = a[2::-1] assert_equal(a, [0, 1, 2, 1, 0]) a = np.arange(5) a[2::-1] = a[:1:-1] assert_equal(a, [2, 3, 4, 3, 4]) a = np.arange(5) a[:1:-1] = a[2::-1] assert_equal(a, [0, 1, 0, 1, 2]) class TestCreation(TestCase): def test_from_attribute(self): class x(object): def __array__(self, dtype=None): pass self.assertRaises(ValueError, np.array, x()) def test_from_string(self): types = np.typecodes['AllInteger'] + np.typecodes['Float'] nstr = ['123', '123'] result = np.array([123, 123], dtype=int) for type in types: msg = 'String conversion for %s' % type assert_equal(np.array(nstr, dtype=type), result, err_msg=msg) def test_void(self): arr = np.array([], dtype='V') assert_equal(arr.dtype.kind, 'V') def test_zeros(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] for dt in types: d = np.zeros((13,), dtype=dt) assert_equal(np.count_nonzero(d), 0) # true for ieee floats assert_equal(d.sum(), 0) assert_(not d.any()) d = np.zeros(2, dtype='(2,4)i4') assert_equal(np.count_nonzero(d), 0) assert_equal(d.sum(), 0) assert_(not d.any()) d = np.zeros(2, dtype='4i4') assert_equal(np.count_nonzero(d), 0) assert_equal(d.sum(), 0) assert_(not d.any()) d = np.zeros(2, dtype='(2,4)i4, (2,4)i4') assert_equal(np.count_nonzero(d), 0) @dec.slow def test_zeros_big(self): # test big array as they might be allocated different by the sytem types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] for dt in types: d = np.zeros((30 * 1024**2,), dtype=dt) assert_(not d.any()) def test_zeros_obj(self): # test initialization from PyLong(0) d = np.zeros((13,), dtype=object) assert_array_equal(d, [0] * 13) assert_equal(np.count_nonzero(d), 0) def test_zeros_obj_obj(self): d = np.zeros(10, dtype=[('k', object, 2)]) assert_array_equal(d['k'], 0) def test_zeros_like_like_zeros(self): # test zeros_like returns the same as zeros for c in np.typecodes['All']: if c == 'V': continue d = np.zeros((3,3), dtype=c) assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) # explicitly check some special cases d = np.zeros((3,3), dtype='S5') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='U5') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='<i4') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='>i4') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='<M8[s]') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='>M8[s]') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='f4,f4') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) def test_empty_unicode(self): # don't throw decode errors on garbage memory for i in range(5, 100, 5): d = np.empty(i, dtype='U') str(d) def test_sequence_non_homogenous(self): assert_equal(np.array([4, 2**80]).dtype, np.object) assert_equal(np.array([4, 2**80, 4]).dtype, np.object) assert_equal(np.array([2**80, 4]).dtype, np.object) assert_equal(np.array([2**80] * 3).dtype, np.object) assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex) assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex) assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex) @dec.skipif(sys.version_info[0] >= 3) def test_sequence_long(self): assert_equal(np.array([long(4), long(4)]).dtype, np.long) assert_equal(np.array([long(4), 2**80]).dtype, np.object) assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object) assert_equal(np.array([2**80, long(4)]).dtype, np.object) def test_non_sequence_sequence(self): """Should not segfault. Class Fail breaks the sequence protocol for new style classes, i.e., those derived from object. Class Map is a mapping type indicated by raising a ValueError. At some point we may raise a warning instead of an error in the Fail case. """ class Fail(object): def __len__(self): return 1 def __getitem__(self, index): raise ValueError() class Map(object): def __len__(self): return 1 def __getitem__(self, index): raise KeyError() a = np.array([Map()]) assert_(a.shape == (1,)) assert_(a.dtype == np.dtype(object)) assert_raises(ValueError, np.array, [Fail()]) def test_no_len_object_type(self): # gh-5100, want object array from iterable object without len() class Point2: def __init__(self): pass def __getitem__(self, ind): if ind in [0, 1]: return ind else: raise IndexError() d = np.array([Point2(), Point2(), Point2()]) assert_equal(d.dtype, np.dtype(object)) class TestStructured(TestCase): def test_subarray_field_access(self): a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))]) a['a'] = np.arange(60).reshape(3, 5, 2, 2) # Since the subarray is always in C-order, a transpose # does not swap the subarray: assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3)) # In Fortran order, the subarray gets appended # like in all other cases, not prepended as a special case b = a.copy(order='F') assert_equal(a['a'].shape, b['a'].shape) assert_equal(a.T['a'].shape, a.T.copy()['a'].shape) def test_subarray_comparison(self): # Check that comparisons between record arrays with # multi-dimensional field types work properly a = np.rec.fromrecords( [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])], dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))]) b = a.copy() assert_equal(a == b, [True, True]) assert_equal(a != b, [False, False]) b[1].b = 'c' assert_equal(a == b, [True, False]) assert_equal(a != b, [False, True]) for i in range(3): b[0].a = a[0].a b[0].a[i] = 5 assert_equal(a == b, [False, False]) assert_equal(a != b, [True, True]) for i in range(2): for j in range(2): b = a.copy() b[0].c[i, j] = 10 assert_equal(a == b, [False, True]) assert_equal(a != b, [True, False]) # Check that broadcasting with a subarray works a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')]) b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')]) assert_equal(a == b, [[True, True, False], [False, False, True]]) assert_equal(b == a, [[True, True, False], [False, False, True]]) a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))]) b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))]) assert_equal(a == b, [[True, True, False], [False, False, True]]) assert_equal(b == a, [[True, True, False], [False, False, True]]) a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))]) b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) assert_equal(a == b, [[True, False, False], [False, False, True]]) assert_equal(b == a, [[True, False, False], [False, False, True]]) # Check that broadcasting Fortran-style arrays with a subarray work a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F') b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) assert_equal(a == b, [[True, False, False], [False, False, True]]) assert_equal(b == a, [[True, False, False], [False, False, True]]) # Check that incompatible sub-array shapes don't result to broadcasting x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')]) y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) # This comparison invokes deprecated behaviour, and will probably # start raising an error eventually. What we really care about in this # test is just that it doesn't return True. with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) assert_equal(x == y, False) x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')]) y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) # This comparison invokes deprecated behaviour, and will probably # start raising an error eventually. What we really care about in this # test is just that it doesn't return True. with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) assert_equal(x == y, False) # Check that structured arrays that are different only in # byte-order work a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')]) b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')]) assert_equal(a == b, [False, True]) def test_casting(self): # Check that casting a structured array to change its byte order # works a = np.array([(1,)], dtype=[('a', '<i4')]) assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe')) b = a.astype([('a', '>i4')]) assert_equal(b, a.byteswap().newbyteorder()) assert_equal(a['a'][0], b['a'][0]) # Check that equality comparison works on structured arrays if # they are 'equiv'-castable a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')]) b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')]) assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) assert_equal(a == b, [True, True]) # Check that 'equiv' casting can reorder fields and change byte # order assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) c = a.astype(b.dtype, casting='equiv') assert_equal(a == c, [True, True]) # Check that 'safe' casting can change byte order and up-cast # fields t = [('a', '<i8'), ('b', '>f8')] assert_(np.can_cast(a.dtype, t, casting='safe')) c = a.astype(t, casting='safe') assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), [True, True]) # Check that 'same_kind' casting can change byte order and # change field widths within a "kind" t = [('a', '<i4'), ('b', '>f4')] assert_(np.can_cast(a.dtype, t, casting='same_kind')) c = a.astype(t, casting='same_kind') assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), [True, True]) # Check that casting fails if the casting rule should fail on # any of the fields t = [('a', '>i8'), ('b', '<f4')] assert_(not np.can_cast(a.dtype, t, casting='safe')) assert_raises(TypeError, a.astype, t, casting='safe') t = [('a', '>i2'), ('b', '<f8')] assert_(not np.can_cast(a.dtype, t, casting='equiv')) assert_raises(TypeError, a.astype, t, casting='equiv') t = [('a', '>i8'), ('b', '<i2')] assert_(not np.can_cast(a.dtype, t, casting='same_kind')) assert_raises(TypeError, a.astype, t, casting='same_kind') assert_(not np.can_cast(a.dtype, b.dtype, casting='no')) assert_raises(TypeError, a.astype, b.dtype, casting='no') # Check that non-'unsafe' casting can't change the set of field names for casting in ['no', 'safe', 'equiv', 'same_kind']: t = [('a', '>i4')] assert_(not np.can_cast(a.dtype, t, casting=casting)) t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')] assert_(not np.can_cast(a.dtype, t, casting=casting)) def test_objview(self): # https://github.com/numpy/numpy/issues/3286 a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')]) a[['a', 'b']] # TypeError? # https://github.com/numpy/numpy/issues/3253 dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')]) dat2[['B', 'A']] # TypeError? def test_setfield(self): # https://github.com/numpy/numpy/issues/3126 struct_dt = np.dtype([('elem', 'i4', 5),]) dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)]) x = np.zeros(1, dt) x[0]['field'] = np.ones(10, dtype='i4') x[0]['struct'] = np.ones(1, dtype=struct_dt) assert_equal(x[0]['field'], np.ones(10, dtype='i4')) def test_setfield_object(self): # make sure object field assignment with ndarray value # on void scalar mimics setitem behavior b = np.zeros(1, dtype=[('x', 'O')]) # next line should work identically to b['x'][0] = np.arange(3) b[0]['x'] = np.arange(3) assert_equal(b[0]['x'], np.arange(3)) #check that broadcasting check still works c = np.zeros(1, dtype=[('x', 'O', 5)]) def testassign(): c[0]['x'] = np.arange(3) assert_raises(ValueError, testassign) class TestBool(TestCase): def test_test_interning(self): a0 = np.bool_(0) b0 = np.bool_(False) self.assertTrue(a0 is b0) a1 = np.bool_(1) b1 = np.bool_(True) self.assertTrue(a1 is b1) self.assertTrue(np.array([True])[0] is a1) self.assertTrue(np.array(True)[()] is a1) def test_sum(self): d = np.ones(101, dtype=np.bool) assert_equal(d.sum(), d.size) assert_equal(d[::2].sum(), d[::2].size) assert_equal(d[::-2].sum(), d[::-2].size) d = np.frombuffer(b'\xff\xff' * 100, dtype=bool) assert_equal(d.sum(), d.size) assert_equal(d[::2].sum(), d[::2].size) assert_equal(d[::-2].sum(), d[::-2].size) def check_count_nonzero(self, power, length): powers = [2 ** i for i in range(length)] for i in range(2**power): l = [(i & x) != 0 for x in powers] a = np.array(l, dtype=np.bool) c = builtins.sum(l) self.assertEqual(np.count_nonzero(a), c) av = a.view(np.uint8) av *= 3 self.assertEqual(np.count_nonzero(a), c) av *= 4 self.assertEqual(np.count_nonzero(a), c) av[av != 0] = 0xFF self.assertEqual(np.count_nonzero(a), c) def test_count_nonzero(self): # check all 12 bit combinations in a length 17 array # covers most cases of the 16 byte unrolled code self.check_count_nonzero(12, 17) @dec.slow def test_count_nonzero_all(self): # check all combinations in a length 17 array # covers all cases of the 16 byte unrolled code self.check_count_nonzero(17, 17) def test_count_nonzero_unaligned(self): # prevent mistakes as e.g. gh-4060 for o in range(7): a = np.zeros((18,), dtype=np.bool)[o+1:] a[:o] = True self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist())) a = np.ones((18,), dtype=np.bool)[o+1:] a[:o] = False self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist())) class TestMethods(TestCase): def test_round(self): def check_round(arr, expected, *round_args): assert_equal(arr.round(*round_args), expected) # With output array out = np.zeros_like(arr) res = arr.round(*round_args, out=out) assert_equal(out, expected) assert_equal(out, res) check_round(np.array([1.2, 1.5]), [1, 2]) check_round(np.array(1.5), 2) check_round(np.array([12.2, 15.5]), [10, 20], -1) check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1) # Complex rounding check_round(np.array([4.5 + 1.5j]), [4 + 2j]) check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) def test_transpose(self): a = np.array([[1, 2], [3, 4]]) assert_equal(a.transpose(), [[1, 3], [2, 4]]) self.assertRaises(ValueError, lambda: a.transpose(0)) self.assertRaises(ValueError, lambda: a.transpose(0, 0)) self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2)) def test_sort(self): # test ordering for floats and complex containing nans. It is only # necessary to check the lessthan comparison, so sorts that # only follow the insertion sort path are sufficient. We only # test doubles and complex doubles as the logic is the same. # check doubles msg = "Test real sort order with nans" a = np.array([np.nan, 1, 0]) b = np.sort(a) assert_equal(b, a[::-1], msg) # check complex msg = "Test complex sort order with nans" a = np.zeros(9, dtype=np.complex128) a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] b = np.sort(a) assert_equal(b, a[::-1], msg) # all c scalar sorts use the same code with different types # so it suffices to run a quick check with one type. The number # of sorted items must be greater than ~50 to check the actual # algorithm because quick and merge sort fall over to insertion # sort for small arrays. a = np.arange(101) b = a[::-1].copy() for kind in ['q', 'm', 'h']: msg = "scalar sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test complex sorts. These use the same code as the scalars # but the compare function differs. ai = a*1j + 1 bi = b*1j + 1 for kind in ['q', 'm', 'h']: msg = "complex sort, real part == 1, kind=%s" % kind c = ai.copy() c.sort(kind=kind) assert_equal(c, ai, msg) c = bi.copy() c.sort(kind=kind) assert_equal(c, ai, msg) ai = a + 1j bi = b + 1j for kind in ['q', 'm', 'h']: msg = "complex sort, imag part == 1, kind=%s" % kind c = ai.copy() c.sort(kind=kind) assert_equal(c, ai, msg) c = bi.copy() c.sort(kind=kind) assert_equal(c, ai, msg) # test sorting of complex arrays requiring byte-swapping, gh-5441 for endianess in '<>': for dt in np.typecodes['Complex']: arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt) c = arr.copy() c.sort() msg = 'byte-swapped complex sort, dtype={0}'.format(dt) assert_equal(c, arr, msg) # test string sorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)]) b = a[::-1].copy() for kind in ['q', 'm', 'h']: msg = "string sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test unicode sorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode) b = a[::-1].copy() for kind in ['q', 'm', 'h']: msg = "unicode sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test object array sorts. a = np.empty((101,), dtype=np.object) a[:] = list(range(101)) b = a[::-1] for kind in ['q', 'h', 'm']: msg = "object sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test record array sorts. dt = np.dtype([('f', float), ('i', int)]) a = np.array([(i, i) for i in range(101)], dtype=dt) b = a[::-1] for kind in ['q', 'h', 'm']: msg = "object sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test datetime64 sorts. a = np.arange(0, 101, dtype='datetime64[D]') b = a[::-1] for kind in ['q', 'h', 'm']: msg = "datetime64 sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test timedelta64 sorts. a = np.arange(0, 101, dtype='timedelta64[D]') b = a[::-1] for kind in ['q', 'h', 'm']: msg = "timedelta64 sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # check axis handling. This should be the same for all type # specific sorts, so we only check it for one type and one kind a = np.array([[3, 2], [1, 0]]) b = np.array([[1, 0], [3, 2]]) c = np.array([[2, 3], [0, 1]]) d = a.copy() d.sort(axis=0) assert_equal(d, b, "test sort with axis=0") d = a.copy() d.sort(axis=1) assert_equal(d, c, "test sort with axis=1") d = a.copy() d.sort() assert_equal(d, c, "test sort with default axis") # check axis handling for multidimensional empty arrays a = np.array([]) a.shape = (3, 2, 1, 0) for axis in range(-a.ndim, a.ndim): msg = 'test empty array sort with axis={0}'.format(axis) assert_equal(np.sort(a, axis=axis), a, msg) msg = 'test empty array sort with axis=None' assert_equal(np.sort(a, axis=None), a.ravel(), msg) def test_copy(self): def assert_fortran(arr): assert_(arr.flags.fortran) assert_(arr.flags.f_contiguous) assert_(not arr.flags.c_contiguous) def assert_c(arr): assert_(not arr.flags.fortran) assert_(not arr.flags.f_contiguous) assert_(arr.flags.c_contiguous) a = np.empty((2, 2), order='F') # Test copying a Fortran array assert_c(a.copy()) assert_c(a.copy('C')) assert_fortran(a.copy('F')) assert_fortran(a.copy('A')) # Now test starting with a C array. a = np.empty((2, 2), order='C') assert_c(a.copy()) assert_c(a.copy('C')) assert_fortran(a.copy('F')) assert_c(a.copy('A')) def test_sort_order(self): # Test sorting an array with fields x1 = np.array([21, 32, 14]) x2 = np.array(['my', 'first', 'name']) x3 = np.array([3.1, 4.5, 6.2]) r = np.rec.fromarrays([x1, x2, x3], names='id,word,number') r.sort(order=['id']) assert_equal(r.id, np.array([14, 21, 32])) assert_equal(r.word, np.array(['name', 'my', 'first'])) assert_equal(r.number, np.array([6.2, 3.1, 4.5])) r.sort(order=['word']) assert_equal(r.id, np.array([32, 21, 14])) assert_equal(r.word, np.array(['first', 'my', 'name'])) assert_equal(r.number, np.array([4.5, 3.1, 6.2])) r.sort(order=['number']) assert_equal(r.id, np.array([21, 32, 14])) assert_equal(r.word, np.array(['my', 'first', 'name'])) assert_equal(r.number, np.array([3.1, 4.5, 6.2])) if sys.byteorder == 'little': strtype = '>i2' else: strtype = '<i2' mydtype = [('name', strchar + '5'), ('col2', strtype)] r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)], dtype=mydtype) r.sort(order='col2') assert_equal(r['col2'], [1, 3, 255, 258]) assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)], dtype=mydtype)) def test_argsort(self): # all c scalar argsorts use the same code with different types # so it suffices to run a quick check with one type. The number # of sorted items must be greater than ~50 to check the actual # algorithm because quick and merge sort fall over to insertion # sort for small arrays. a = np.arange(101) b = a[::-1].copy() for kind in ['q', 'm', 'h']: msg = "scalar argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), a, msg) assert_equal(b.copy().argsort(kind=kind), b, msg) # test complex argsorts. These use the same code as the scalars # but the compare fuction differs. ai = a*1j + 1 bi = b*1j + 1 for kind in ['q', 'm', 'h']: msg = "complex argsort, kind=%s" % kind assert_equal(ai.copy().argsort(kind=kind), a, msg) assert_equal(bi.copy().argsort(kind=kind), b, msg) ai = a + 1j bi = b + 1j for kind in ['q', 'm', 'h']: msg = "complex argsort, kind=%s" % kind assert_equal(ai.copy().argsort(kind=kind), a, msg) assert_equal(bi.copy().argsort(kind=kind), b, msg) # test argsort of complex arrays requiring byte-swapping, gh-5441 for endianess in '<>': for dt in np.typecodes['Complex']: arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt) msg = 'byte-swapped complex argsort, dtype={0}'.format(dt) assert_equal(arr.argsort(), np.arange(len(arr), dtype=np.intp), msg) # test string argsorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)]) b = a[::-1].copy() r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h']: msg = "string argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test unicode argsorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode) b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h']: msg = "unicode argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test object array argsorts. a = np.empty((101,), dtype=np.object) a[:] = list(range(101)) b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h']: msg = "object argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test structured array argsorts. dt = np.dtype([('f', float), ('i', int)]) a = np.array([(i, i) for i in range(101)], dtype=dt) b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h']: msg = "structured array argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test datetime64 argsorts. a = np.arange(0, 101, dtype='datetime64[D]') b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'h', 'm']: msg = "datetime64 argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test timedelta64 argsorts. a = np.arange(0, 101, dtype='timedelta64[D]') b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'h', 'm']: msg = "timedelta64 argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # check axis handling. This should be the same for all type # specific argsorts, so we only check it for one type and one kind a = np.array([[3, 2], [1, 0]]) b = np.array([[1, 1], [0, 0]]) c = np.array([[1, 0], [1, 0]]) assert_equal(a.copy().argsort(axis=0), b) assert_equal(a.copy().argsort(axis=1), c) assert_equal(a.copy().argsort(), c) # using None is known fail at this point #assert_equal(a.copy().argsort(axis=None, c) # check axis handling for multidimensional empty arrays a = np.array([]) a.shape = (3, 2, 1, 0) for axis in range(-a.ndim, a.ndim): msg = 'test empty array argsort with axis={0}'.format(axis) assert_equal(np.argsort(a, axis=axis), np.zeros_like(a, dtype=np.intp), msg) msg = 'test empty array argsort with axis=None' assert_equal(np.argsort(a, axis=None), np.zeros_like(a.ravel(), dtype=np.intp), msg) # check that stable argsorts are stable r = np.arange(100) # scalars a = np.zeros(100) assert_equal(a.argsort(kind='m'), r) # complex a = np.zeros(100, dtype=np.complex) assert_equal(a.argsort(kind='m'), r) # string a = np.array(['aaaaaaaaa' for i in range(100)]) assert_equal(a.argsort(kind='m'), r) # unicode a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode) assert_equal(a.argsort(kind='m'), r) def test_sort_unicode_kind(self): d = np.arange(10) k = b'\xc3\xa4'.decode("UTF8") assert_raises(ValueError, d.sort, kind=k) assert_raises(ValueError, d.argsort, kind=k) def test_searchsorted(self): # test for floats and complex containing nans. The logic is the # same for all float types so only test double types for now. # The search sorted routines use the compare functions for the # array type, so this checks if that is consistent with the sort # order. # check double a = np.array([0, 1, np.nan]) msg = "Test real searchsorted with nans, side='l'" b = a.searchsorted(a, side='l') assert_equal(b, np.arange(3), msg) msg = "Test real searchsorted with nans, side='r'" b = a.searchsorted(a, side='r') assert_equal(b, np.arange(1, 4), msg) # check double complex a = np.zeros(9, dtype=np.complex128) a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan] a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan] msg = "Test complex searchsorted with nans, side='l'" b = a.searchsorted(a, side='l') assert_equal(b, np.arange(9), msg) msg = "Test complex searchsorted with nans, side='r'" b = a.searchsorted(a, side='r') assert_equal(b, np.arange(1, 10), msg) msg = "Test searchsorted with little endian, side='l'" a = np.array([0, 128], dtype='<i4') b = a.searchsorted(np.array(128, dtype='<i4')) assert_equal(b, 1, msg) msg = "Test searchsorted with big endian, side='l'" a = np.array([0, 128], dtype='>i4') b = a.searchsorted(np.array(128, dtype='>i4')) assert_equal(b, 1, msg) # Check 0 elements a = np.ones(0) b = a.searchsorted([0, 1, 2], 'l') assert_equal(b, [0, 0, 0]) b = a.searchsorted([0, 1, 2], 'r') assert_equal(b, [0, 0, 0]) a = np.ones(1) # Check 1 element b = a.searchsorted([0, 1, 2], 'l') assert_equal(b, [0, 0, 1]) b = a.searchsorted([0, 1, 2], 'r') assert_equal(b, [0, 1, 1]) # Check all elements equal a = np.ones(2) b = a.searchsorted([0, 1, 2], 'l') assert_equal(b, [0, 0, 2]) b = a.searchsorted([0, 1, 2], 'r') assert_equal(b, [0, 2, 2]) # Test searching unaligned array a = np.arange(10) aligned = np.empty(a.itemsize * a.size + 1, 'uint8') unaligned = aligned[1:].view(a.dtype) unaligned[:] = a # Test searching unaligned array b = unaligned.searchsorted(a, 'l') assert_equal(b, a) b = unaligned.searchsorted(a, 'r') assert_equal(b, a + 1) # Test searching for unaligned keys b = a.searchsorted(unaligned, 'l') assert_equal(b, a) b = a.searchsorted(unaligned, 'r') assert_equal(b, a + 1) # Test smart resetting of binsearch indices a = np.arange(5) b = a.searchsorted([6, 5, 4], 'l') assert_equal(b, [5, 5, 4]) b = a.searchsorted([6, 5, 4], 'r') assert_equal(b, [5, 5, 5]) # Test all type specific binary search functions types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'], np.typecodes['Datetime'], '?O')) for dt in types: if dt == 'M': dt = 'M8[D]' if dt == '?': a = np.arange(2, dtype=dt) out = np.arange(2) else: a = np.arange(0, 5, dtype=dt) out = np.arange(5) b = a.searchsorted(a, 'l') assert_equal(b, out) b = a.searchsorted(a, 'r') assert_equal(b, out + 1) def test_searchsorted_unicode(self): # Test searchsorted on unicode strings. # 1.6.1 contained a string length miscalculation in # arraytypes.c.src:UNICODE_compare() which manifested as # incorrect/inconsistent results from searchsorted. a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'], dtype=np.unicode) ind = np.arange(len(a)) assert_equal([a.searchsorted(v, 'left') for v in a], ind) assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1) assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind) assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1) def test_searchsorted_with_sorter(self): a = np.array([5, 2, 1, 3, 4]) s = np.argsort(a) assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3))) assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6]) # bounds check assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3]) a = np.random.rand(300) s = a.argsort() b = np.sort(a) k = np.linspace(0, 1, 20) assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s)) a = np.array([0, 1, 2, 3, 5]*20) s = a.argsort() k = [0, 1, 2, 3, 5] expected = [0, 20, 40, 60, 80] assert_equal(a.searchsorted(k, side='l', sorter=s), expected) expected = [20, 40, 60, 80, 100] assert_equal(a.searchsorted(k, side='r', sorter=s), expected) # Test searching unaligned array keys = np.arange(10) a = keys.copy() np.random.shuffle(s) s = a.argsort() aligned = np.empty(a.itemsize * a.size + 1, 'uint8') unaligned = aligned[1:].view(a.dtype) # Test searching unaligned array unaligned[:] = a b = unaligned.searchsorted(keys, 'l', s) assert_equal(b, keys) b = unaligned.searchsorted(keys, 'r', s) assert_equal(b, keys + 1) # Test searching for unaligned keys unaligned[:] = keys b = a.searchsorted(unaligned, 'l', s) assert_equal(b, keys) b = a.searchsorted(unaligned, 'r', s) assert_equal(b, keys + 1) # Test all type specific indirect binary search functions types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'], np.typecodes['Datetime'], '?O')) for dt in types: if dt == 'M': dt = 'M8[D]' if dt == '?': a = np.array([1, 0], dtype=dt) # We want the sorter array to be of a type that is different # from np.intp in all platforms, to check for #4698 s = np.array([1, 0], dtype=np.int16) out = np.array([1, 0]) else: a = np.array([3, 4, 1, 2, 0], dtype=dt) # We want the sorter array to be of a type that is different # from np.intp in all platforms, to check for #4698 s = np.array([4, 2, 3, 0, 1], dtype=np.int16) out = np.array([3, 4, 1, 2, 0], dtype=np.intp) b = a.searchsorted(a, 'l', s) assert_equal(b, out) b = a.searchsorted(a, 'r', s) assert_equal(b, out + 1) # Test non-contiguous sorter array a = np.array([3, 4, 1, 2, 0]) srt = np.empty((10,), dtype=np.intp) srt[1::2] = -1 srt[::2] = [4, 2, 3, 0, 1] s = srt[::2] out = np.array([3, 4, 1, 2, 0], dtype=np.intp) b = a.searchsorted(a, 'l', s) assert_equal(b, out) b = a.searchsorted(a, 'r', s) assert_equal(b, out + 1) def test_searchsorted_return_type(self): # Functions returning indices should always return base ndarrays class A(np.ndarray): pass a = np.arange(5).view(A) b = np.arange(1, 3).view(A) s = np.arange(5).view(A) assert_(not isinstance(a.searchsorted(b, 'l'), A)) assert_(not isinstance(a.searchsorted(b, 'r'), A)) assert_(not isinstance(a.searchsorted(b, 'l', s), A)) assert_(not isinstance(a.searchsorted(b, 'r', s), A)) def test_argpartition_out_of_range(self): # Test out of range values in kth raise an error, gh-5469 d = np.arange(10) assert_raises(ValueError, d.argpartition, 10) assert_raises(ValueError, d.argpartition, -11) # Test also for generic type argpartition, which uses sorting # and used to not bound check kth d_obj = np.arange(10, dtype=object) assert_raises(ValueError, d_obj.argpartition, 10) assert_raises(ValueError, d_obj.argpartition, -11) def test_partition_out_of_range(self): # Test out of range values in kth raise an error, gh-5469 d = np.arange(10) assert_raises(ValueError, d.partition, 10) assert_raises(ValueError, d.partition, -11) # Test also for generic type partition, which uses sorting # and used to not bound check kth d_obj = np.arange(10, dtype=object) assert_raises(ValueError, d_obj.partition, 10) assert_raises(ValueError, d_obj.partition, -11) def test_partition_empty_array(self): # check axis handling for multidimensional empty arrays a = np.array([]) a.shape = (3, 2, 1, 0) for axis in range(-a.ndim, a.ndim): msg = 'test empty array partition with axis={0}'.format(axis) assert_equal(np.partition(a, 0, axis=axis), a, msg) msg = 'test empty array partition with axis=None' assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg) def test_argpartition_empty_array(self): # check axis handling for multidimensional empty arrays a = np.array([]) a.shape = (3, 2, 1, 0) for axis in range(-a.ndim, a.ndim): msg = 'test empty array argpartition with axis={0}'.format(axis) assert_equal(np.partition(a, 0, axis=axis), np.zeros_like(a, dtype=np.intp), msg) msg = 'test empty array argpartition with axis=None' assert_equal(np.partition(a, 0, axis=None), np.zeros_like(a.ravel(), dtype=np.intp), msg) def test_partition(self): d = np.arange(10) assert_raises(TypeError, np.partition, d, 2, kind=1) assert_raises(ValueError, np.partition, d, 2, kind="nonsense") assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense") assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense") assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense") for k in ("introselect",): d = np.array([]) assert_array_equal(np.partition(d, 0, kind=k), d) assert_array_equal(np.argpartition(d, 0, kind=k), d) d = np.ones((1)) assert_array_equal(np.partition(d, 0, kind=k)[0], d) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) # kth not modified kth = np.array([30, 15, 5]) okth = kth.copy() np.partition(np.arange(40), kth) assert_array_equal(kth, okth) for r in ([2, 1], [1, 2], [1, 1]): d = np.array(r) tgt = np.sort(d) assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0]) assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1]) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) assert_array_equal(d[np.argpartition(d, 1, kind=k)], np.partition(d, 1, kind=k)) for i in range(d.size): d[i:].partition(0, kind=k) assert_array_equal(d, tgt) for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1], [1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]): d = np.array(r) tgt = np.sort(d) assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0]) assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1]) assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2]) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) assert_array_equal(d[np.argpartition(d, 1, kind=k)], np.partition(d, 1, kind=k)) assert_array_equal(d[np.argpartition(d, 2, kind=k)], np.partition(d, 2, kind=k)) for i in range(d.size): d[i:].partition(0, kind=k) assert_array_equal(d, tgt) d = np.ones((50)) assert_array_equal(np.partition(d, 0, kind=k), d) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) # sorted d = np.arange((49)) self.assertEqual(np.partition(d, 5, kind=k)[5], 5) self.assertEqual(np.partition(d, 15, kind=k)[15], 15) assert_array_equal(d[np.argpartition(d, 5, kind=k)], np.partition(d, 5, kind=k)) assert_array_equal(d[np.argpartition(d, 15, kind=k)], np.partition(d, 15, kind=k)) # rsorted d = np.arange((47))[::-1] self.assertEqual(np.partition(d, 6, kind=k)[6], 6) self.assertEqual(np.partition(d, 16, kind=k)[16], 16) assert_array_equal(d[np.argpartition(d, 6, kind=k)], np.partition(d, 6, kind=k)) assert_array_equal(d[np.argpartition(d, 16, kind=k)], np.partition(d, 16, kind=k)) assert_array_equal(np.partition(d, -6, kind=k), np.partition(d, 41, kind=k)) assert_array_equal(np.partition(d, -16, kind=k), np.partition(d, 31, kind=k)) assert_array_equal(d[np.argpartition(d, -6, kind=k)], np.partition(d, 41, kind=k)) # median of 3 killer, O(n^2) on pure median 3 pivot quickselect # exercises the median of median of 5 code used to keep O(n) d = np.arange(1000000) x = np.roll(d, d.size // 2) mid = x.size // 2 + 1 assert_equal(np.partition(x, mid)[mid], mid) d = np.arange(1000001) x = np.roll(d, d.size // 2 + 1) mid = x.size // 2 + 1 assert_equal(np.partition(x, mid)[mid], mid) # max d = np.ones(10) d[1] = 4 assert_equal(np.partition(d, (2, -1))[-1], 4) assert_equal(np.partition(d, (2, -1))[2], 1) assert_equal(d[np.argpartition(d, (2, -1))][-1], 4) assert_equal(d[np.argpartition(d, (2, -1))][2], 1) d[1] = np.nan assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1])) assert_(np.isnan(np.partition(d, (2, -1))[-1])) # equal elements d = np.arange((47)) % 7 tgt = np.sort(np.arange((47)) % 7) np.random.shuffle(d) for i in range(d.size): self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i]) assert_array_equal(d[np.argpartition(d, 6, kind=k)], np.partition(d, 6, kind=k)) assert_array_equal(d[np.argpartition(d, 16, kind=k)], np.partition(d, 16, kind=k)) for i in range(d.size): d[i:].partition(0, kind=k) assert_array_equal(d, tgt) d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 9]) kth = [0, 3, 19, 20] assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7)) assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7)) d = np.array([2, 1]) d.partition(0, kind=k) assert_raises(ValueError, d.partition, 2) assert_raises(ValueError, d.partition, 3, axis=1) assert_raises(ValueError, np.partition, d, 2) assert_raises(ValueError, np.partition, d, 2, axis=1) assert_raises(ValueError, d.argpartition, 2) assert_raises(ValueError, d.argpartition, 3, axis=1) assert_raises(ValueError, np.argpartition, d, 2) assert_raises(ValueError, np.argpartition, d, 2, axis=1) d = np.arange(10).reshape((2, 5)) d.partition(1, axis=0, kind=k) d.partition(4, axis=1, kind=k) np.partition(d, 1, axis=0, kind=k) np.partition(d, 4, axis=1, kind=k) np.partition(d, 1, axis=None, kind=k) np.partition(d, 9, axis=None, kind=k) d.argpartition(1, axis=0, kind=k) d.argpartition(4, axis=1, kind=k) np.argpartition(d, 1, axis=0, kind=k) np.argpartition(d, 4, axis=1, kind=k) np.argpartition(d, 1, axis=None, kind=k) np.argpartition(d, 9, axis=None, kind=k) assert_raises(ValueError, d.partition, 2, axis=0) assert_raises(ValueError, d.partition, 11, axis=1) assert_raises(TypeError, d.partition, 2, axis=None) assert_raises(ValueError, np.partition, d, 9, axis=1) assert_raises(ValueError, np.partition, d, 11, axis=None) assert_raises(ValueError, d.argpartition, 2, axis=0) assert_raises(ValueError, d.argpartition, 11, axis=1) assert_raises(ValueError, np.argpartition, d, 9, axis=1) assert_raises(ValueError, np.argpartition, d, 11, axis=None) td = [(dt, s) for dt in [np.int32, np.float32, np.complex64] for s in (9, 16)] for dt, s in td: aae = assert_array_equal at = self.assertTrue d = np.arange(s, dtype=dt) np.random.shuffle(d) d1 = np.tile(np.arange(s, dtype=dt), (4, 1)) map(np.random.shuffle, d1) d0 = np.transpose(d1) for i in range(d.size): p = np.partition(d, i, kind=k) self.assertEqual(p[i], i) # all before are smaller assert_array_less(p[:i], p[i]) # all after are larger assert_array_less(p[i], p[i + 1:]) aae(p, d[np.argpartition(d, i, kind=k)]) p = np.partition(d1, i, axis=1, kind=k) aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt)) # array_less does not seem to work right at((p[:, :i].T <= p[:, i]).all(), msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T)) at((p[:, i + 1:].T > p[:, i]).all(), msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) aae(p, d1[np.arange(d1.shape[0])[:, None], np.argpartition(d1, i, axis=1, kind=k)]) p = np.partition(d0, i, axis=0, kind=k) aae(p[i,:], np.array([i] * d1.shape[0], dtype=dt)) # array_less does not seem to work right at((p[:i,:] <= p[i,:]).all(), msg="%d: %r <= %r" % (i, p[i,:], p[:i,:])) at((p[i + 1:,:] > p[i,:]).all(), msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:])) aae(p, d0[np.argpartition(d0, i, axis=0, kind=k), np.arange(d0.shape[1])[None,:]]) # check inplace dc = d.copy() dc.partition(i, kind=k) assert_equal(dc, np.partition(d, i, kind=k)) dc = d0.copy() dc.partition(i, axis=0, kind=k) assert_equal(dc, np.partition(d0, i, axis=0, kind=k)) dc = d1.copy() dc.partition(i, axis=1, kind=k) assert_equal(dc, np.partition(d1, i, axis=1, kind=k)) def assert_partitioned(self, d, kth): prev = 0 for k in np.sort(kth): assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k) assert_((d[k:] >= d[k]).all(), msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k])) prev = k + 1 def test_partition_iterative(self): d = np.arange(17) kth = (0, 1, 2, 429, 231) assert_raises(ValueError, d.partition, kth) assert_raises(ValueError, d.argpartition, kth) d = np.arange(10).reshape((2, 5)) assert_raises(ValueError, d.partition, kth, axis=0) assert_raises(ValueError, d.partition, kth, axis=1) assert_raises(ValueError, np.partition, d, kth, axis=1) assert_raises(ValueError, np.partition, d, kth, axis=None) d = np.array([3, 4, 2, 1]) p = np.partition(d, (0, 3)) self.assert_partitioned(p, (0, 3)) self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) assert_array_equal(p, np.partition(d, (-3, -1))) assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) d = np.arange(17) np.random.shuffle(d) d.partition(range(d.size)) assert_array_equal(np.arange(17), d) np.random.shuffle(d) assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) # test unsorted kth d = np.arange(17) np.random.shuffle(d) keys = np.array([1, 3, 8, -2]) np.random.shuffle(d) p = np.partition(d, keys) self.assert_partitioned(p, keys) p = d[np.argpartition(d, keys)] self.assert_partitioned(p, keys) np.random.shuffle(keys) assert_array_equal(np.partition(d, keys), p) assert_array_equal(d[np.argpartition(d, keys)], p) # equal kth d = np.arange(20)[::-1] self.assert_partitioned(np.partition(d, [5]*4), [5]) self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]), [5]*4 + [6, 13]) self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5]) self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])], [5]*4 + [6, 13]) d = np.arange(12) np.random.shuffle(d) d1 = np.tile(np.arange(12), (4, 1)) map(np.random.shuffle, d1) d0 = np.transpose(d1) kth = (1, 6, 7, -1) p = np.partition(d1, kth, axis=1) pa = d1[np.arange(d1.shape[0])[:, None], d1.argpartition(kth, axis=1)] assert_array_equal(p, pa) for i in range(d1.shape[0]): self.assert_partitioned(p[i,:], kth) p = np.partition(d0, kth, axis=0) pa = d0[np.argpartition(d0, kth, axis=0), np.arange(d0.shape[1])[None,:]] assert_array_equal(p, pa) for i in range(d0.shape[1]): self.assert_partitioned(p[:, i], kth) def test_partition_cdtype(self): d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), ('Lancelot', 1.9, 38)], dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')]) tgt = np.sort(d, order=['age', 'height']) assert_array_equal(np.partition(d, range(d.size), order=['age', 'height']), tgt) assert_array_equal(d[np.argpartition(d, range(d.size), order=['age', 'height'])], tgt) for k in range(d.size): assert_equal(np.partition(d, k, order=['age', 'height'])[k], tgt[k]) assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k], tgt[k]) d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot']) tgt = np.sort(d) assert_array_equal(np.partition(d, range(d.size)), tgt) for k in range(d.size): assert_equal(np.partition(d, k)[k], tgt[k]) assert_equal(d[np.argpartition(d, k)][k], tgt[k]) def test_partition_unicode_kind(self): d = np.arange(10) k = b'\xc3\xa4'.decode("UTF8") assert_raises(ValueError, d.partition, 2, kind=k) assert_raises(ValueError, d.argpartition, 2, kind=k) def test_partition_fuzz(self): # a few rounds of random data testing for j in range(10, 30): for i in range(1, j - 2): d = np.arange(j) np.random.shuffle(d) d = d % np.random.randint(2, 30) idx = np.random.randint(d.size) kth = [0, idx, i, i + 1] tgt = np.sort(d)[kth] assert_array_equal(np.partition(d, kth)[kth], tgt, err_msg="data: %r\n kth: %r" % (d, kth)) def test_argpartition_gh5524(self): # A test for functionality of argpartition on lists. d = [6,7,3,2,9,0] p = np.argpartition(d,1) self.assert_partitioned(np.array(d)[p],[1]) def test_flatten(self): x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32) x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32) y0 = np.array([1, 2, 3, 4, 5, 6], np.int32) y0f = np.array([1, 4, 2, 5, 3, 6], np.int32) y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32) y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32) assert_equal(x0.flatten(), y0) assert_equal(x0.flatten('F'), y0f) assert_equal(x0.flatten('F'), x0.T.flatten()) assert_equal(x1.flatten(), y1) assert_equal(x1.flatten('F'), y1f) assert_equal(x1.flatten('F'), x1.T.flatten()) def test_dot(self): a = np.array([[1, 0], [0, 1]]) b = np.array([[0, 1], [1, 0]]) c = np.array([[9, 1], [1, -9]]) assert_equal(np.dot(a, b), a.dot(b)) assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c)) # test passing in an output array c = np.zeros_like(a) a.dot(b, c) assert_equal(c, np.dot(a, b)) # test keyword args c = np.zeros_like(a) a.dot(b=b, out=c) assert_equal(c, np.dot(a, b)) def test_dot_override(self): # Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844 return class A(object): def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): return "A" class B(object): def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): return NotImplemented a = A() b = B() c = np.array([[1]]) assert_equal(np.dot(a, b), "A") assert_equal(c.dot(a), "A") assert_raises(TypeError, np.dot, b, c) assert_raises(TypeError, c.dot, b) def test_diagonal(self): a = np.arange(12).reshape((3, 4)) assert_equal(a.diagonal(), [0, 5, 10]) assert_equal(a.diagonal(0), [0, 5, 10]) assert_equal(a.diagonal(1), [1, 6, 11]) assert_equal(a.diagonal(-1), [4, 9]) b = np.arange(8).reshape((2, 2, 2)) assert_equal(b.diagonal(), [[0, 6], [1, 7]]) assert_equal(b.diagonal(0), [[0, 6], [1, 7]]) assert_equal(b.diagonal(1), [[2], [3]]) assert_equal(b.diagonal(-1), [[4], [5]]) assert_raises(ValueError, b.diagonal, axis1=0, axis2=0) assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]]) assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]]) assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]]) # Order of axis argument doesn't matter: assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]]) def test_diagonal_view_notwriteable(self): # this test is only for 1.9, the diagonal view will be # writeable in 1.10. a = np.eye(3).diagonal() assert_(not a.flags.writeable) assert_(not a.flags.owndata) a = np.diagonal(np.eye(3)) assert_(not a.flags.writeable) assert_(not a.flags.owndata) a = np.diag(np.eye(3)) assert_(not a.flags.writeable) assert_(not a.flags.owndata) def test_diagonal_memleak(self): # Regression test for a bug that crept in at one point a = np.zeros((100, 100)) assert_(sys.getrefcount(a) < 50) for i in range(100): a.diagonal() assert_(sys.getrefcount(a) < 50) def test_put(self): icodes = np.typecodes['AllInteger'] fcodes = np.typecodes['AllFloat'] for dt in icodes + fcodes + 'O': tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt) # test 1-d a = np.zeros(6, dtype=dt) a.put([1, 3, 5], [1, 3, 5]) assert_equal(a, tgt) # test 2-d a = np.zeros((2, 3), dtype=dt) a.put([1, 3, 5], [1, 3, 5]) assert_equal(a, tgt.reshape(2, 3)) for dt in '?': tgt = np.array([False, True, False, True, False, True], dtype=dt) # test 1-d a = np.zeros(6, dtype=dt) a.put([1, 3, 5], [True]*3) assert_equal(a, tgt) # test 2-d a = np.zeros((2, 3), dtype=dt) a.put([1, 3, 5], [True]*3) assert_equal(a, tgt.reshape(2, 3)) # check must be writeable a = np.zeros(6) a.flags.writeable = False assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5]) def test_ravel(self): a = np.array([[0, 1], [2, 3]]) assert_equal(a.ravel(), [0, 1, 2, 3]) assert_(not a.ravel().flags.owndata) assert_equal(a.ravel('F'), [0, 2, 1, 3]) assert_equal(a.ravel(order='C'), [0, 1, 2, 3]) assert_equal(a.ravel(order='F'), [0, 2, 1, 3]) assert_equal(a.ravel(order='A'), [0, 1, 2, 3]) assert_(not a.ravel(order='A').flags.owndata) assert_equal(a.ravel(order='K'), [0, 1, 2, 3]) assert_(not a.ravel(order='K').flags.owndata) assert_equal(a.ravel(), a.reshape(-1)) a = np.array([[0, 1], [2, 3]], order='F') assert_equal(a.ravel(), [0, 1, 2, 3]) assert_equal(a.ravel(order='A'), [0, 2, 1, 3]) assert_equal(a.ravel(order='K'), [0, 2, 1, 3]) assert_(not a.ravel(order='A').flags.owndata) assert_(not a.ravel(order='K').flags.owndata) assert_equal(a.ravel(), a.reshape(-1)) assert_equal(a.ravel(order='A'), a.reshape(-1, order='A')) a = np.array([[0, 1], [2, 3]])[::-1, :] assert_equal(a.ravel(), [2, 3, 0, 1]) assert_equal(a.ravel(order='C'), [2, 3, 0, 1]) assert_equal(a.ravel(order='F'), [2, 0, 3, 1]) assert_equal(a.ravel(order='A'), [2, 3, 0, 1]) # 'K' doesn't reverse the axes of negative strides assert_equal(a.ravel(order='K'), [2, 3, 0, 1]) assert_(a.ravel(order='K').flags.owndata) # Test simple 1-d copy behaviour: a = np.arange(10)[::2] assert_(a.ravel('K').flags.owndata) assert_(a.ravel('C').flags.owndata) assert_(a.ravel('F').flags.owndata) # Not contiguous and 1-sized axis with non matching stride a = np.arange(2**3 * 2)[::2] a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 a.strides = strides assert_(a.ravel(order='K').flags.owndata) assert_equal(a.ravel('K'), np.arange(0, 15, 2)) # contiguous and 1-sized axis with non matching stride works: a = np.arange(2**3) a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 a.strides = strides assert_(np.may_share_memory(a.ravel(order='K'), a)) assert_equal(a.ravel(order='K'), np.arange(2**3)) # Test negative strides (not very interesting since non-contiguous): a = np.arange(4)[::-1].reshape(2, 2) assert_(a.ravel(order='C').flags.owndata) assert_(a.ravel(order='K').flags.owndata) assert_equal(a.ravel('C'), [3, 2, 1, 0]) assert_equal(a.ravel('K'), [3, 2, 1, 0]) # 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING): a = np.array([[1]]) a.strides = (123, 432) # If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing # them up on purpose: if np.ones(1).strides == (8,): assert_(np.may_share_memory(a.ravel('K'), a)) assert_equal(a.ravel('K').strides, (a.dtype.itemsize,)) for order in ('C', 'F', 'A', 'K'): # 0-d corner case: a = np.array(0) assert_equal(a.ravel(order), [0]) assert_(np.may_share_memory(a.ravel(order), a)) # Test that certain non-inplace ravels work right (mostly) for 'K': b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2) a = b[..., ::2] assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28]) assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28]) assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28]) assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28]) a = b[::2, ...] assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14]) assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14]) assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14]) assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14]) def test_ravel_subclass(self): class ArraySubclass(np.ndarray): pass a = np.arange(10).view(ArraySubclass) assert_(isinstance(a.ravel('C'), ArraySubclass)) assert_(isinstance(a.ravel('F'), ArraySubclass)) assert_(isinstance(a.ravel('A'), ArraySubclass)) assert_(isinstance(a.ravel('K'), ArraySubclass)) a = np.arange(10)[::2].view(ArraySubclass) assert_(isinstance(a.ravel('C'), ArraySubclass)) assert_(isinstance(a.ravel('F'), ArraySubclass)) assert_(isinstance(a.ravel('A'), ArraySubclass)) assert_(isinstance(a.ravel('K'), ArraySubclass)) def test_swapaxes(self): a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy() idx = np.indices(a.shape) assert_(a.flags['OWNDATA']) b = a.copy() # check exceptions assert_raises(ValueError, a.swapaxes, -5, 0) assert_raises(ValueError, a.swapaxes, 4, 0) assert_raises(ValueError, a.swapaxes, 0, -5) assert_raises(ValueError, a.swapaxes, 0, 4) for i in range(-4, 4): for j in range(-4, 4): for k, src in enumerate((a, b)): c = src.swapaxes(i, j) # check shape shape = list(src.shape) shape[i] = src.shape[j] shape[j] = src.shape[i] assert_equal(c.shape, shape, str((i, j, k))) # check array contents i0, i1, i2, i3 = [dim-1 for dim in c.shape] j0, j1, j2, j3 = [dim-1 for dim in src.shape] assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]], c[idx[i0], idx[i1], idx[i2], idx[i3]], str((i, j, k))) # check a view is always returned, gh-5260 assert_(not c.flags['OWNDATA'], str((i, j, k))) # check on non-contiguous input array if k == 1: b = c def test_conjugate(self): a = np.array([1-1j, 1+1j, 23+23.0j]) ac = a.conj() assert_equal(a.real, ac.real) assert_equal(a.imag, -ac.imag) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) a = np.array([1-1j, 1+1j, 23+23.0j], 'F') ac = a.conj() assert_equal(a.real, ac.real) assert_equal(a.imag, -ac.imag) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) a = np.array([1, 2, 3]) ac = a.conj() assert_equal(a, ac) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) a = np.array([1.0, 2.0, 3.0]) ac = a.conj() assert_equal(a, ac) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) a = np.array([1-1j, 1+1j, 1, 2.0], object) ac = a.conj() assert_equal(ac, [k.conjugate() for k in a]) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) a = np.array([1-1j, 1, 2.0, 'f'], object) assert_raises(AttributeError, lambda: a.conj()) assert_raises(AttributeError, lambda: a.conjugate()) class TestBinop(object): def test_inplace(self): # test refcount 1 inplace conversion assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]), [0.5, 1.0]) d = np.array([0.5, 0.5])[::2] assert_array_almost_equal(d * (d * np.array([1.0, 2.0])), [0.25, 0.5]) a = np.array([0.5]) b = np.array([0.5]) c = a + b c = a - b c = a * b c = a / b assert_equal(a, b) assert_almost_equal(c, 1.) c = a + b * 2. / b * a - a / b assert_equal(a, b) assert_equal(c, 0.5) # true divide a = np.array([5]) b = np.array([3]) c = (a * a) / b assert_almost_equal(c, 25 / 3) assert_equal(a, 5) assert_equal(b, 3) def test_extension_incref_elide(self): # test extension (e.g. cython) calling PyNumber_* slots without # increasing the reference counts # # def incref_elide(a): # d = input.copy() # refcount 1 # return d, d + d # PyNumber_Add without increasing refcount from numpy.core.multiarray_tests import incref_elide d = np.ones(5) orig, res = incref_elide(d) # the return original should not be changed to an inplace operation assert_array_equal(orig, d) assert_array_equal(res, d + d) def test_extension_incref_elide_stack(self): # scanning if the refcount == 1 object is on the python stack to check # that we are called directly from python is flawed as object may still # be above the stack pointer and we have no access to the top of it # # def incref_elide_l(d): # return l[4] + l[4] # PyNumber_Add without increasing refcount from numpy.core.multiarray_tests import incref_elide_l # padding with 1 makes sure the object on the stack is not overwriten l = [1, 1, 1, 1, np.ones(5)] res = incref_elide_l(l) # the return original should not be changed to an inplace operation assert_array_equal(l[4], np.ones(5)) assert_array_equal(res, l[4] + l[4]) def test_ufunc_override_rop_precedence(self): # Check that __rmul__ and other right-hand operations have # precedence over __numpy_ufunc__ # Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844 return ops = { '__add__': ('__radd__', np.add, True), '__sub__': ('__rsub__', np.subtract, True), '__mul__': ('__rmul__', np.multiply, True), '__truediv__': ('__rtruediv__', np.true_divide, True), '__floordiv__': ('__rfloordiv__', np.floor_divide, True), '__mod__': ('__rmod__', np.remainder, True), '__divmod__': ('__rdivmod__', None, False), '__pow__': ('__rpow__', np.power, True), '__lshift__': ('__rlshift__', np.left_shift, True), '__rshift__': ('__rrshift__', np.right_shift, True), '__and__': ('__rand__', np.bitwise_and, True), '__xor__': ('__rxor__', np.bitwise_xor, True), '__or__': ('__ror__', np.bitwise_or, True), '__ge__': ('__le__', np.less_equal, False), '__gt__': ('__lt__', np.less, False), '__le__': ('__ge__', np.greater_equal, False), '__lt__': ('__gt__', np.greater, False), '__eq__': ('__eq__', np.equal, False), '__ne__': ('__ne__', np.not_equal, False), } class OtherNdarraySubclass(np.ndarray): pass class OtherNdarraySubclassWithOverride(np.ndarray): def __numpy_ufunc__(self, *a, **kw): raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have " "been called!") % (a, kw)) def check(op_name, ndsubclass): rop_name, np_op, has_iop = ops[op_name] if has_iop: iop_name = '__i' + op_name[2:] iop = getattr(operator, iop_name) if op_name == "__divmod__": op = divmod else: op = getattr(operator, op_name) # Dummy class def __init__(self, *a, **kw): pass def __numpy_ufunc__(self, *a, **kw): raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have " "been called!") % (a, kw)) def __op__(self, *other): return "op" def __rop__(self, *other): return "rop" if ndsubclass: bases = (np.ndarray,) else: bases = (object,) dct = {'__init__': __init__, '__numpy_ufunc__': __numpy_ufunc__, op_name: __op__} if op_name != rop_name: dct[rop_name] = __rop__ cls = type("Rop" + rop_name, bases, dct) # Check behavior against both bare ndarray objects and a # ndarray subclasses with and without their own override obj = cls((1,), buffer=np.ones(1,)) arr_objs = [np.array([1]), np.array([2]).view(OtherNdarraySubclass), np.array([3]).view(OtherNdarraySubclassWithOverride), ] for arr in arr_objs: err_msg = "%r %r" % (op_name, arr,) # Check that ndarray op gives up if it sees a non-subclass if not isinstance(obj, arr.__class__): assert_equal(getattr(arr, op_name)(obj), NotImplemented, err_msg=err_msg) # Check that the Python binops have priority assert_equal(op(obj, arr), "op", err_msg=err_msg) if op_name == rop_name: assert_equal(op(arr, obj), "op", err_msg=err_msg) else: assert_equal(op(arr, obj), "rop", err_msg=err_msg) # Check that Python binops have priority also for in-place ops if has_iop: assert_equal(getattr(arr, iop_name)(obj), NotImplemented, err_msg=err_msg) if op_name != "__pow__": # inplace pow requires the other object to be # integer-like? assert_equal(iop(arr, obj), "rop", err_msg=err_msg) # Check that ufunc call __numpy_ufunc__ normally if np_op is not None: assert_raises(AssertionError, np_op, arr, obj, err_msg=err_msg) assert_raises(AssertionError, np_op, obj, arr, err_msg=err_msg) # Check all binary operations for op_name in sorted(ops.keys()): yield check, op_name, True yield check, op_name, False def test_ufunc_override_rop_simple(self): # Temporarily disable __numpy_ufunc__ for 1.10; see gh-5864 return # Check parts of the binary op overriding behavior in an # explicit test case that is easier to understand. class SomeClass(object): def __numpy_ufunc__(self, *a, **kw): return "ufunc" def __mul__(self, other): return 123 def __rmul__(self, other): return 321 def __rsub__(self, other): return "no subs for me" def __gt__(self, other): return "yep" def __lt__(self, other): return "nope" class SomeClass2(SomeClass, np.ndarray): def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw): if ufunc is np.multiply or ufunc is np.bitwise_and: return "ufunc" else: inputs = list(inputs) inputs[i] = np.asarray(self) func = getattr(ufunc, method) r = func(*inputs, **kw) if 'out' in kw: return r else: x = self.__class__(r.shape, dtype=r.dtype) x[...] = r return x class SomeClass3(SomeClass2): def __rsub__(self, other): return "sub for me" arr = np.array([0]) obj = SomeClass() obj2 = SomeClass2((1,), dtype=np.int_) obj2[0] = 9 obj3 = SomeClass3((1,), dtype=np.int_) obj3[0] = 4 # obj is first, so should get to define outcome. assert_equal(obj * arr, 123) # obj is second, but has __numpy_ufunc__ and defines __rmul__. assert_equal(arr * obj, 321) # obj is second, but has __numpy_ufunc__ and defines __rsub__. assert_equal(arr - obj, "no subs for me") # obj is second, but has __numpy_ufunc__ and defines __lt__. assert_equal(arr > obj, "nope") # obj is second, but has __numpy_ufunc__ and defines __gt__. assert_equal(arr < obj, "yep") # Called as a ufunc, obj.__numpy_ufunc__ is used. assert_equal(np.multiply(arr, obj), "ufunc") # obj is second, but has __numpy_ufunc__ and defines __rmul__. arr *= obj assert_equal(arr, 321) # obj2 is an ndarray subclass, so CPython takes care of the same rules. assert_equal(obj2 * arr, 123) assert_equal(arr * obj2, 321) assert_equal(arr - obj2, "no subs for me") assert_equal(arr > obj2, "nope") assert_equal(arr < obj2, "yep") # Called as a ufunc, obj2.__numpy_ufunc__ is called. assert_equal(np.multiply(arr, obj2), "ufunc") # Also when the method is not overridden. assert_equal(arr & obj2, "ufunc") arr *= obj2 assert_equal(arr, 321) obj2 += 33 assert_equal(obj2[0], 42) assert_equal(obj2.sum(), 42) assert_(isinstance(obj2, SomeClass2)) # Obj3 is subclass that defines __rsub__. CPython calls it. assert_equal(arr - obj3, "sub for me") assert_equal(obj2 - obj3, "sub for me") # obj3 is a subclass that defines __rmul__. CPython calls it. assert_equal(arr * obj3, 321) # But not here, since obj3.__rmul__ is obj2.__rmul__. assert_equal(obj2 * obj3, 123) # And of course, here obj3.__mul__ should be called. assert_equal(obj3 * obj2, 123) # obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__. # (and both are just ndarray.__radd__); see #4815. res = obj2 + obj3 assert_equal(res, 46) assert_(isinstance(res, SomeClass2)) # Since obj3 is a subclass, it should have precedence, like CPython # would give, even though obj2 has __numpy_ufunc__ and __radd__. # See gh-4815 and gh-5747. res = obj3 + obj2 assert_equal(res, 46) assert_(isinstance(res, SomeClass3)) def test_ufunc_override_normalize_signature(self): # Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844 return # gh-5674 class SomeClass(object): def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw): return kw a = SomeClass() kw = np.add(a, [1]) assert_('sig' not in kw and 'signature' not in kw) kw = np.add(a, [1], sig='ii->i') assert_('sig' not in kw and 'signature' in kw) assert_equal(kw['signature'], 'ii->i') kw = np.add(a, [1], signature='ii->i') assert_('sig' not in kw and 'signature' in kw) assert_equal(kw['signature'], 'ii->i') class TestCAPI(TestCase): def test_IsPythonScalar(self): from numpy.core.multiarray_tests import IsPythonScalar assert_(IsPythonScalar(b'foobar')) assert_(IsPythonScalar(1)) assert_(IsPythonScalar(2**80)) assert_(IsPythonScalar(2.)) assert_(IsPythonScalar("a")) class TestSubscripting(TestCase): def test_test_zero_rank(self): x = np.array([1, 2, 3]) self.assertTrue(isinstance(x[0], np.int_)) if sys.version_info[0] < 3: self.assertTrue(isinstance(x[0], int)) self.assertTrue(type(x[0, ...]) is np.ndarray) class TestPickling(TestCase): def test_roundtrip(self): import pickle carray = np.array([[2, 9], [7, 0], [3, 8]]) DATA = [ carray, np.transpose(carray), np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int), ('c', float)]) ] for a in DATA: assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a) def _loads(self, obj): if sys.version_info[0] >= 3: return np.loads(obj, encoding='latin1') else: return np.loads(obj) # version 0 pickles, using protocol=2 to pickle # version 0 doesn't have a version field def test_version0_int8(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' a = np.array([1, 2, 3, 4], dtype=np.int8) p = self._loads(asbytes(s)) assert_equal(a, p) def test_version0_float32(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.' a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32) p = self._loads(asbytes(s)) assert_equal(a, p) def test_version0_object(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.' a = np.array([{'a':1}, {'b':2}]) p = self._loads(asbytes(s)) assert_equal(a, p) # version 1 pickles, using protocol=2 to pickle def test_version1_int8(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' a = np.array([1, 2, 3, 4], dtype=np.int8) p = self._loads(asbytes(s)) assert_equal(a, p) def test_version1_float32(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.' a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32) p = self._loads(asbytes(s)) assert_equal(a, p) def test_version1_object(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.' a = np.array([{'a':1}, {'b':2}]) p = self._loads(asbytes(s)) assert_equal(a, p) def test_subarray_int_shape(self): s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb." a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)]) p = self._loads(asbytes(s)) assert_equal(a, p) class TestFancyIndexing(TestCase): def test_list(self): x = np.ones((1, 1)) x[:, [0]] = 2.0 assert_array_equal(x, np.array([[2.0]])) x = np.ones((1, 1, 1)) x[:,:, [0]] = 2.0 assert_array_equal(x, np.array([[[2.0]]])) def test_tuple(self): x = np.ones((1, 1)) x[:, (0,)] = 2.0 assert_array_equal(x, np.array([[2.0]])) x = np.ones((1, 1, 1)) x[:,:, (0,)] = 2.0 assert_array_equal(x, np.array([[[2.0]]])) def test_mask(self): x = np.array([1, 2, 3, 4]) m = np.array([0, 1, 0, 0], bool) assert_array_equal(x[m], np.array([2])) def test_mask2(self): x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) m = np.array([0, 1], bool) m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool) m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool) assert_array_equal(x[m], np.array([[5, 6, 7, 8]])) assert_array_equal(x[m2], np.array([2, 5])) assert_array_equal(x[m3], np.array([2])) def test_assign_mask(self): x = np.array([1, 2, 3, 4]) m = np.array([0, 1, 0, 0], bool) x[m] = 5 assert_array_equal(x, np.array([1, 5, 3, 4])) def test_assign_mask2(self): xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) m = np.array([0, 1], bool) m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool) m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool) x = xorig.copy() x[m] = 10 assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]])) x = xorig.copy() x[m2] = 10 assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]])) x = xorig.copy() x[m3] = 10 assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]])) class TestStringCompare(TestCase): def test_string(self): g1 = np.array(["This", "is", "example"]) g2 = np.array(["This", "was", "example"]) assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) def test_mixed(self): g1 = np.array(["spam", "spa", "spammer", "and eggs"]) g2 = "spam" assert_array_equal(g1 == g2, [x == g2 for x in g1]) assert_array_equal(g1 != g2, [x != g2 for x in g1]) assert_array_equal(g1 < g2, [x < g2 for x in g1]) assert_array_equal(g1 > g2, [x > g2 for x in g1]) assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) def test_unicode(self): g1 = np.array([sixu("This"), sixu("is"), sixu("example")]) g2 = np.array([sixu("This"), sixu("was"), sixu("example")]) assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) class TestArgmax(TestCase): nan_arr = [ ([0, 1, 2, 3, np.nan], 4), ([0, 1, 2, np.nan, 3], 3), ([np.nan, 0, 1, 2, 3], 0), ([np.nan, 0, np.nan, 2, 3], 0), ([0, 1, 2, 3, complex(0, np.nan)], 4), ([0, 1, 2, 3, complex(np.nan, 0)], 4), ([0, 1, 2, complex(np.nan, 0), 3], 3), ([0, 1, 2, complex(0, np.nan), 3], 3), ([complex(0, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), ([complex(0, 0), complex(0, 2), complex(0, 1)], 1), ([complex(1, 0), complex(0, 2), complex(0, 1)], 0), ([complex(1, 0), complex(0, 2), complex(1, 1)], 2), ([np.datetime64('1923-04-14T12:43:12'), np.datetime64('1994-06-21T14:43:15'), np.datetime64('2001-10-15T04:10:32'), np.datetime64('1995-11-25T16:02:16'), np.datetime64('2005-01-04T03:14:12'), np.datetime64('2041-12-03T14:05:03')], 5), ([np.datetime64('1935-09-14T04:40:11'), np.datetime64('1949-10-12T12:32:11'), np.datetime64('2010-01-03T05:14:12'), np.datetime64('2015-11-20T12:20:59'), np.datetime64('1932-09-23T10:10:13'), np.datetime64('2014-10-10T03:50:30')], 3), # Assorted tests with NaTs ([np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('2010-01-03T05:14:12'), np.datetime64('NaT'), np.datetime64('2015-09-23T10:10:13'), np.datetime64('1932-10-10T03:50:30')], 4), ([np.datetime64('2059-03-14T12:43:12'), np.datetime64('1996-09-21T14:43:15'), np.datetime64('NaT'), np.datetime64('2022-12-25T16:02:16'), np.datetime64('1963-10-04T03:14:12'), np.datetime64('2013-05-08T18:15:23')], 0), ([np.timedelta64(2, 's'), np.timedelta64(1, 's'), np.timedelta64('NaT', 's'), np.timedelta64(3, 's')], 3), ([np.timedelta64('NaT', 's')] * 3, 0), ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), timedelta(days=-1, seconds=23)], 0), ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), timedelta(days=5, seconds=14)], 1), ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), timedelta(days=10, seconds=43)], 2), ([False, False, False, False, True], 4), ([False, False, False, True, False], 3), ([True, False, False, False, False], 0), ([True, False, True, False, False], 0), # Can't reduce a "flexible type" #(['a', 'z', 'aa', 'zz'], 3), #(['zz', 'a', 'aa', 'a'], 0), #(['aa', 'z', 'zz', 'a'], 2), ] def test_all(self): a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) for i in range(a.ndim): amax = a.max(i) aargmax = a.argmax(i) axes = list(range(a.ndim)) axes.remove(i) assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes)))) def test_combinations(self): for arr, pos in self.nan_arr: assert_equal(np.argmax(arr), pos, err_msg="%r" % arr) assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr) def test_output_shape(self): # see also gh-616 a = np.ones((10, 5)) # Check some simple shape mismatches out = np.ones(11, dtype=np.int_) assert_raises(ValueError, a.argmax, -1, out) out = np.ones((2, 5), dtype=np.int_) assert_raises(ValueError, a.argmax, -1, out) # these could be relaxed possibly (used to allow even the previous) out = np.ones((1, 10), dtype=np.int_) assert_raises(ValueError, a.argmax, -1, np.ones((1, 10))) out = np.ones(10, dtype=np.int_) a.argmax(-1, out=out) assert_equal(out, a.argmax(-1)) def test_argmax_unicode(self): d = np.zeros(6031, dtype='<U9') d[5942] = "as" assert_equal(d.argmax(), 5942) def test_np_vs_ndarray(self): # make sure both ndarray.argmax and numpy.argmax support out/axis args a = np.random.normal(size=(2,3)) #check positional args out1 = np.zeros(2, dtype=int) out2 = np.zeros(2, dtype=int) assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2)) assert_equal(out1, out2) #check keyword args out1 = np.zeros(3, dtype=int) out2 = np.zeros(3, dtype=int) assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0)) assert_equal(out1, out2) class TestArgmin(TestCase): nan_arr = [ ([0, 1, 2, 3, np.nan], 4), ([0, 1, 2, np.nan, 3], 3), ([np.nan, 0, 1, 2, 3], 0), ([np.nan, 0, np.nan, 2, 3], 0), ([0, 1, 2, 3, complex(0, np.nan)], 4), ([0, 1, 2, 3, complex(np.nan, 0)], 4), ([0, 1, 2, complex(np.nan, 0), 3], 3), ([0, 1, 2, complex(0, np.nan), 3], 3), ([complex(0, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), ([complex(0, 0), complex(0, 2), complex(0, 1)], 0), ([complex(1, 0), complex(0, 2), complex(0, 1)], 2), ([complex(1, 0), complex(0, 2), complex(1, 1)], 1), ([np.datetime64('1923-04-14T12:43:12'), np.datetime64('1994-06-21T14:43:15'), np.datetime64('2001-10-15T04:10:32'), np.datetime64('1995-11-25T16:02:16'), np.datetime64('2005-01-04T03:14:12'), np.datetime64('2041-12-03T14:05:03')], 0), ([np.datetime64('1935-09-14T04:40:11'), np.datetime64('1949-10-12T12:32:11'), np.datetime64('2010-01-03T05:14:12'), np.datetime64('2014-11-20T12:20:59'), np.datetime64('2015-09-23T10:10:13'), np.datetime64('1932-10-10T03:50:30')], 5), # Assorted tests with NaTs ([np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('2010-01-03T05:14:12'), np.datetime64('NaT'), np.datetime64('2015-09-23T10:10:13'), np.datetime64('1932-10-10T03:50:30')], 5), ([np.datetime64('2059-03-14T12:43:12'), np.datetime64('1996-09-21T14:43:15'), np.datetime64('NaT'), np.datetime64('2022-12-25T16:02:16'), np.datetime64('1963-10-04T03:14:12'), np.datetime64('2013-05-08T18:15:23')], 4), ([np.timedelta64(2, 's'), np.timedelta64(1, 's'), np.timedelta64('NaT', 's'), np.timedelta64(3, 's')], 1), ([np.timedelta64('NaT', 's')] * 3, 0), ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), timedelta(days=-1, seconds=23)], 2), ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), timedelta(days=5, seconds=14)], 0), ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), timedelta(days=10, seconds=43)], 1), ([True, True, True, True, False], 4), ([True, True, True, False, True], 3), ([False, True, True, True, True], 0), ([False, True, False, True, True], 0), # Can't reduce a "flexible type" #(['a', 'z', 'aa', 'zz'], 0), #(['zz', 'a', 'aa', 'a'], 1), #(['aa', 'z', 'zz', 'a'], 3), ] def test_all(self): a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) for i in range(a.ndim): amin = a.min(i) aargmin = a.argmin(i) axes = list(range(a.ndim)) axes.remove(i) assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes)))) def test_combinations(self): for arr, pos in self.nan_arr: assert_equal(np.argmin(arr), pos, err_msg="%r" % arr) assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr) def test_minimum_signed_integers(self): a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8) assert_equal(np.argmin(a), 1) a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16) assert_equal(np.argmin(a), 1) a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32) assert_equal(np.argmin(a), 1) a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64) assert_equal(np.argmin(a), 1) def test_output_shape(self): # see also gh-616 a = np.ones((10, 5)) # Check some simple shape mismatches out = np.ones(11, dtype=np.int_) assert_raises(ValueError, a.argmin, -1, out) out = np.ones((2, 5), dtype=np.int_) assert_raises(ValueError, a.argmin, -1, out) # these could be relaxed possibly (used to allow even the previous) out = np.ones((1, 10), dtype=np.int_) assert_raises(ValueError, a.argmin, -1, np.ones((1, 10))) out = np.ones(10, dtype=np.int_) a.argmin(-1, out=out) assert_equal(out, a.argmin(-1)) def test_argmin_unicode(self): d = np.ones(6031, dtype='<U9') d[6001] = "0" assert_equal(d.argmin(), 6001) def test_np_vs_ndarray(self): # make sure both ndarray.argmin and numpy.argmin support out/axis args a = np.random.normal(size=(2,3)) #check positional args out1 = np.zeros(2, dtype=int) out2 = np.ones(2, dtype=int) assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2)) assert_equal(out1, out2) #check keyword args out1 = np.zeros(3, dtype=int) out2 = np.ones(3, dtype=int) assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0)) assert_equal(out1, out2) class TestMinMax(TestCase): def test_scalar(self): assert_raises(ValueError, np.amax, 1, 1) assert_raises(ValueError, np.amin, 1, 1) assert_equal(np.amax(1, axis=0), 1) assert_equal(np.amin(1, axis=0), 1) assert_equal(np.amax(1, axis=None), 1) assert_equal(np.amin(1, axis=None), 1) def test_axis(self): assert_raises(ValueError, np.amax, [1, 2, 3], 1000) assert_equal(np.amax([[1, 2, 3]], axis=1), 3) def test_datetime(self): # NaTs are ignored for dtype in ('m8[s]', 'm8[Y]'): a = np.arange(10).astype(dtype) a[3] = 'NaT' assert_equal(np.amin(a), a[0]) assert_equal(np.amax(a), a[9]) a[0] = 'NaT' assert_equal(np.amin(a), a[1]) assert_equal(np.amax(a), a[9]) a.fill('NaT') assert_equal(np.amin(a), a[0]) assert_equal(np.amax(a), a[0]) class TestNewaxis(TestCase): def test_basic(self): sk = np.array([0, -0.1, 0.1]) res = 250*sk[:, np.newaxis] assert_almost_equal(res.ravel(), 250*sk) class TestClip(TestCase): def _check_range(self, x, cmin, cmax): assert_(np.all(x >= cmin)) assert_(np.all(x <= cmax)) def _clip_type(self, type_group, array_max, clip_min, clip_max, inplace=False, expected_min=None, expected_max=None): if expected_min is None: expected_min = clip_min if expected_max is None: expected_max = clip_max for T in np.sctypes[type_group]: if sys.byteorder == 'little': byte_orders = ['=', '>'] else: byte_orders = ['<', '='] for byteorder in byte_orders: dtype = np.dtype(T).newbyteorder(byteorder) x = (np.random.random(1000) * array_max).astype(dtype) if inplace: x.clip(clip_min, clip_max, x) else: x = x.clip(clip_min, clip_max) byteorder = '=' if x.dtype.byteorder == '|': byteorder = '|' assert_equal(x.dtype.byteorder, byteorder) self._check_range(x, expected_min, expected_max) return x def test_basic(self): for inplace in [False, True]: self._clip_type( 'float', 1024, -12.8, 100.2, inplace=inplace) self._clip_type( 'float', 1024, 0, 0, inplace=inplace) self._clip_type( 'int', 1024, -120, 100.5, inplace=inplace) self._clip_type( 'int', 1024, 0, 0, inplace=inplace) self._clip_type( 'uint', 1024, 0, 0, inplace=inplace) self._clip_type( 'uint', 1024, -120, 100, inplace=inplace, expected_min=0) def test_record_array(self): rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')]) y = rec['x'].clip(-0.3, 0.5) self._check_range(y, -0.3, 0.5) def test_max_or_min(self): val = np.array([0, 1, 2, 3, 4, 5, 6, 7]) x = val.clip(3) assert_(np.all(x >= 3)) x = val.clip(min=3) assert_(np.all(x >= 3)) x = val.clip(max=4) assert_(np.all(x <= 4)) class TestPutmask(object): def tst_basic(self, x, T, mask, val): np.putmask(x, mask, val) assert_(np.all(x[mask] == T(val))) assert_(x.dtype == T) def test_ip_types(self): unchecked_types = [str, unicode, np.void, object] x = np.random.random(1000)*100 mask = x < 40 for val in [-100, 0, 15]: for types in np.sctypes.values(): for T in types: if T not in unchecked_types: yield self.tst_basic, x.copy().astype(T), T, mask, val def test_mask_size(self): assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) def tst_byteorder(self, dtype): x = np.array([1, 2, 3], dtype) np.putmask(x, [True, False, True], -1) assert_array_equal(x, [-1, 2, -1]) def test_ip_byteorder(self): for dtype in ('>i4', '<i4'): yield self.tst_byteorder, dtype def test_record_array(self): # Note mixed byteorder. rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')]) np.putmask(rec['x'], [True, False], 10) assert_array_equal(rec['x'], [10, 5]) assert_array_equal(rec['y'], [2, 4]) assert_array_equal(rec['z'], [3, 3]) np.putmask(rec['y'], [True, False], 11) assert_array_equal(rec['x'], [10, 5]) assert_array_equal(rec['y'], [11, 4]) assert_array_equal(rec['z'], [3, 3]) def test_masked_array(self): ## x = np.array([1,2,3]) ## z = np.ma.array(x,mask=[True,False,False]) ## np.putmask(z,[True,True,True],3) pass class TestTake(object): def tst_basic(self, x): ind = list(range(x.shape[0])) assert_array_equal(x.take(ind, axis=0), x) def test_ip_types(self): unchecked_types = [str, unicode, np.void, object] x = np.random.random(24)*100 x.shape = 2, 3, 4 for types in np.sctypes.values(): for T in types: if T not in unchecked_types: yield self.tst_basic, x.copy().astype(T) def test_raise(self): x = np.random.random(24)*100 x.shape = 2, 3, 4 assert_raises(IndexError, x.take, [0, 1, 2], axis=0) assert_raises(IndexError, x.take, [-3], axis=0) assert_array_equal(x.take([-1], axis=0)[0], x[1]) def test_clip(self): x = np.random.random(24)*100 x.shape = 2, 3, 4 assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0]) assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1]) def test_wrap(self): x = np.random.random(24)*100 x.shape = 2, 3, 4 assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1]) assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0]) assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1]) def tst_byteorder(self, dtype): x = np.array([1, 2, 3], dtype) assert_array_equal(x.take([0, 2, 1]), [1, 3, 2]) def test_ip_byteorder(self): for dtype in ('>i4', '<i4'): yield self.tst_byteorder, dtype def test_record_array(self): # Note mixed byteorder. rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')]) rec1 = rec.take([1]) assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0) class TestLexsort(TestCase): def test_basic(self): a = [1, 2, 1, 3, 1, 5] b = [0, 4, 5, 6, 2, 3] idx = np.lexsort((b, a)) expected_idx = np.array([0, 4, 2, 1, 3, 5]) assert_array_equal(idx, expected_idx) x = np.vstack((b, a)) idx = np.lexsort(x) assert_array_equal(idx, expected_idx) assert_array_equal(x[1][idx], np.sort(x[1])) def test_datetime(self): a = np.array([0,0,0], dtype='datetime64[D]') b = np.array([2,1,0], dtype='datetime64[D]') idx = np.lexsort((b, a)) expected_idx = np.array([2, 1, 0]) assert_array_equal(idx, expected_idx) a = np.array([0,0,0], dtype='timedelta64[D]') b = np.array([2,1,0], dtype='timedelta64[D]') idx = np.lexsort((b, a)) expected_idx = np.array([2, 1, 0]) assert_array_equal(idx, expected_idx) class TestIO(object): """Test tofile, fromfile, tobytes, and fromstring""" def setUp(self): shape = (2, 4, 3) rand = np.random.random self.x = rand(shape) + rand(shape).astype(np.complex)*1j self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan] self.dtype = self.x.dtype self.tempdir = tempfile.mkdtemp() self.filename = tempfile.mktemp(dir=self.tempdir) def tearDown(self): shutil.rmtree(self.tempdir) def test_bool_fromstring(self): v = np.array([True, False, True, False], dtype=np.bool_) y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_) assert_array_equal(v, y) def test_uint64_fromstring(self): d = np.fromstring("9923372036854775807 104783749223640", dtype=np.uint64, sep=' ') e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64) assert_array_equal(d, e) def test_int64_fromstring(self): d = np.fromstring("-25041670086757 104783749223640", dtype=np.int64, sep=' ') e = np.array([-25041670086757, 104783749223640], dtype=np.int64) assert_array_equal(d, e) def test_empty_files_binary(self): f = open(self.filename, 'w') f.close() y = np.fromfile(self.filename) assert_(y.size == 0, "Array not empty") def test_empty_files_text(self): f = open(self.filename, 'w') f.close() y = np.fromfile(self.filename, sep=" ") assert_(y.size == 0, "Array not empty") def test_roundtrip_file(self): f = open(self.filename, 'wb') self.x.tofile(f) f.close() # NB. doesn't work with flush+seek, due to use of C stdio f = open(self.filename, 'rb') y = np.fromfile(f, dtype=self.dtype) f.close() assert_array_equal(y, self.x.flat) def test_roundtrip_filename(self): self.x.tofile(self.filename) y = np.fromfile(self.filename, dtype=self.dtype) assert_array_equal(y, self.x.flat) def test_roundtrip_binary_str(self): s = self.x.tobytes() y = np.fromstring(s, dtype=self.dtype) assert_array_equal(y, self.x.flat) s = self.x.tobytes('F') y = np.fromstring(s, dtype=self.dtype) assert_array_equal(y, self.x.flatten('F')) def test_roundtrip_str(self): x = self.x.real.ravel() s = "@".join(map(str, x)) y = np.fromstring(s, sep="@") # NB. str imbues less precision nan_mask = ~np.isfinite(x) assert_array_equal(x[nan_mask], y[nan_mask]) assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5) def test_roundtrip_repr(self): x = self.x.real.ravel() s = "@".join(map(repr, x)) y = np.fromstring(s, sep="@") assert_array_equal(x, y) def test_file_position_after_fromfile(self): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE//8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE*8] for size in sizes: f = open(self.filename, 'wb') f.seek(size-1) f.write(b'\0') f.close() for mode in ['rb', 'r+b']: err_msg = "%d %s" % (size, mode) f = open(self.filename, mode) f.read(2) np.fromfile(f, dtype=np.float64, count=1) pos = f.tell() f.close() assert_equal(pos, 10, err_msg=err_msg) def test_file_position_after_tofile(self): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE//8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE*8] for size in sizes: err_msg = "%d" % (size,) f = open(self.filename, 'wb') f.seek(size-1) f.write(b'\0') f.seek(10) f.write(b'12') np.array([0], dtype=np.float64).tofile(f) pos = f.tell() f.close() assert_equal(pos, 10 + 2 + 8, err_msg=err_msg) f = open(self.filename, 'r+b') f.read(2) f.seek(0, 1) # seek between read&write required by ANSI C np.array([0], dtype=np.float64).tofile(f) pos = f.tell() f.close() assert_equal(pos, 10, err_msg=err_msg) def _check_from(self, s, value, **kw): y = np.fromstring(asbytes(s), **kw) assert_array_equal(y, value) f = open(self.filename, 'wb') f.write(asbytes(s)) f.close() y = np.fromfile(self.filename, **kw) assert_array_equal(y, value) def test_nan(self): self._check_from( "nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], sep=' ') def test_inf(self): self._check_from( "inf +inf -inf infinity -Infinity iNfInItY -inF", [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], sep=' ') def test_numbers(self): self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133", [1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ') def test_binary(self): self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', np.array([1, 2, 3, 4]), dtype='<f4') @dec.slow # takes > 1 minute on mechanical hard drive def test_big_binary(self): """Test workarounds for 32-bit limited fwrite, fseek, and ftell calls in windows. These normally would hang doing something like this. See http://projects.scipy.org/numpy/ticket/1660""" if sys.platform != 'win32': return try: # before workarounds, only up to 2**32-1 worked fourgbplus = 2**32 + 2**16 testbytes = np.arange(8, dtype=np.int8) n = len(testbytes) flike = tempfile.NamedTemporaryFile() f = flike.file np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f) flike.seek(0) a = np.fromfile(f, dtype=np.int8) flike.close() assert_(len(a) == fourgbplus) # check only start and end for speed: assert_((a[:n] == testbytes).all()) assert_((a[-n:] == testbytes).all()) except (MemoryError, ValueError): pass def test_string(self): self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',') def test_counted_string(self): self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',') self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',') self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',') def test_string_with_ws(self): self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ') def test_counted_string_with_ws(self): self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int, sep=' ') def test_ascii(self): self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',') self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',') def test_malformed(self): self._check_from('1.234 1,234', [1.234, 1.], sep=' ') def test_long_sep(self): self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_') def test_dtype(self): v = np.array([1, 2, 3, 4], dtype=np.int_) self._check_from('1,2,3,4', v, sep=',', dtype=np.int_) def test_dtype_bool(self): # can't use _check_from because fromstring can't handle True/False v = np.array([True, False, True, False], dtype=np.bool_) s = '1,0,-2.3,0' f = open(self.filename, 'wb') f.write(asbytes(s)) f.close() y = np.fromfile(self.filename, sep=',', dtype=np.bool_) assert_(y.dtype == '?') assert_array_equal(y, v) def test_tofile_sep(self): x = np.array([1.51, 2, 3.51, 4], dtype=float) f = open(self.filename, 'w') x.tofile(f, sep=',') f.close() f = open(self.filename, 'r') s = f.read() f.close() assert_equal(s, '1.51,2.0,3.51,4.0') def test_tofile_format(self): x = np.array([1.51, 2, 3.51, 4], dtype=float) f = open(self.filename, 'w') x.tofile(f, sep=',', format='%.2f') f.close() f = open(self.filename, 'r') s = f.read() f.close() assert_equal(s, '1.51,2.00,3.51,4.00') def test_locale(self): in_foreign_locale(self.test_numbers)() in_foreign_locale(self.test_nan)() in_foreign_locale(self.test_inf)() in_foreign_locale(self.test_counted_string)() in_foreign_locale(self.test_ascii)() in_foreign_locale(self.test_malformed)() in_foreign_locale(self.test_tofile_sep)() in_foreign_locale(self.test_tofile_format)() class TestFromBuffer(object): def tst_basic(self, buffer, expected, kwargs): assert_array_equal(np.frombuffer(buffer,**kwargs), expected) def test_ip_basic(self): for byteorder in ['<', '>']: for dtype in [float, int, np.complex]: dt = np.dtype(dtype).newbyteorder(byteorder) x = (np.random.random((4, 7))*5).astype(dt) buf = x.tobytes() yield self.tst_basic, buf, x.flat, {'dtype':dt} def test_empty(self): yield self.tst_basic, asbytes(''), np.array([]), {} class TestFlat(TestCase): def setUp(self): a0 = np.arange(20.0) a = a0.reshape(4, 5) a0.shape = (4, 5) a.flags.writeable = False self.a = a self.b = a[::2, ::2] self.a0 = a0 self.b0 = a0[::2, ::2] def test_contiguous(self): testpassed = False try: self.a.flat[12] = 100.0 except ValueError: testpassed = True assert testpassed assert self.a.flat[12] == 12.0 def test_discontiguous(self): testpassed = False try: self.b.flat[4] = 100.0 except ValueError: testpassed = True assert testpassed assert self.b.flat[4] == 12.0 def test___array__(self): c = self.a.flat.__array__() d = self.b.flat.__array__() e = self.a0.flat.__array__() f = self.b0.flat.__array__() assert c.flags.writeable is False assert d.flags.writeable is False assert e.flags.writeable is True assert f.flags.writeable is True assert c.flags.updateifcopy is False assert d.flags.updateifcopy is False assert e.flags.updateifcopy is False assert f.flags.updateifcopy is True assert f.base is self.b0 class TestResize(TestCase): def test_basic(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) x.resize((5, 5)) assert_array_equal(x.flat[:9], np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) assert_array_equal(x[9:].flat, 0) def test_check_reference(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) y = x self.assertRaises(ValueError, x.resize, (5, 1)) del y # avoid pyflakes unused variable warning. def test_int_shape(self): x = np.eye(3) x.resize(3) assert_array_equal(x, np.eye(3)[0,:]) def test_none_shape(self): x = np.eye(3) x.resize(None) assert_array_equal(x, np.eye(3)) x.resize() assert_array_equal(x, np.eye(3)) def test_invalid_arguements(self): self.assertRaises(TypeError, np.eye(3).resize, 'hi') self.assertRaises(ValueError, np.eye(3).resize, -1) self.assertRaises(TypeError, np.eye(3).resize, order=1) self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi') def test_freeform_shape(self): x = np.eye(3) x.resize(3, 2, 1) assert_(x.shape == (3, 2, 1)) def test_zeros_appended(self): x = np.eye(3) x.resize(2, 3, 3) assert_array_equal(x[0], np.eye(3)) assert_array_equal(x[1], np.zeros((3, 3))) def test_obj_obj(self): # check memory is initialized on resize, gh-4857 a = np.ones(10, dtype=[('k', object, 2)]) a.resize(15,) assert_equal(a.shape, (15,)) assert_array_equal(a['k'][-5:], 0) assert_array_equal(a['k'][:-5], 1) class TestRecord(TestCase): def test_field_rename(self): dt = np.dtype([('f', float), ('i', int)]) dt.names = ['p', 'q'] assert_equal(dt.names, ['p', 'q']) if sys.version_info[0] >= 3: def test_bytes_fields(self): # Bytes are not allowed in field names and not recognized in titles # on Py3 assert_raises(TypeError, np.dtype, [(asbytes('a'), int)]) assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)]) dt = np.dtype([((asbytes('a'), 'b'), int)]) assert_raises(ValueError, dt.__getitem__, asbytes('a')) x = np.array([(1,), (2,), (3,)], dtype=dt) assert_raises(IndexError, x.__getitem__, asbytes('a')) y = x[0] assert_raises(IndexError, y.__getitem__, asbytes('a')) else: def test_unicode_field_titles(self): # Unicode field titles are added to field dict on Py2 title = unicode('b') dt = np.dtype([((title, 'a'), int)]) dt[title] dt['a'] x = np.array([(1,), (2,), (3,)], dtype=dt) x[title] x['a'] y = x[0] y[title] y['a'] def test_unicode_field_names(self): # Unicode field names are not allowed on Py2 title = unicode('b') assert_raises(TypeError, np.dtype, [(title, int)]) assert_raises(TypeError, np.dtype, [(('a', title), int)]) def test_field_names(self): # Test unicode and 8-bit / byte strings can be used a = np.zeros((1,), dtype=[('f1', 'i4'), ('f2', 'i4'), ('f3', [('sf1', 'i4')])]) is_py3 = sys.version_info[0] >= 3 if is_py3: funcs = (str,) # byte string indexing fails gracefully assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1) assert_raises(IndexError, a.__getitem__, asbytes('f1')) assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1) assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1')) else: funcs = (str, unicode) for func in funcs: b = a.copy() fn1 = func('f1') b[fn1] = 1 assert_equal(b[fn1], 1) fnn = func('not at all') assert_raises(ValueError, b.__setitem__, fnn, 1) assert_raises(ValueError, b.__getitem__, fnn) b[0][fn1] = 2 assert_equal(b[fn1], 2) # Subfield assert_raises(ValueError, b[0].__setitem__, fnn, 1) assert_raises(ValueError, b[0].__getitem__, fnn) # Subfield fn3 = func('f3') sfn1 = func('sf1') b[fn3][sfn1] = 1 assert_equal(b[fn3][sfn1], 1) assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) assert_raises(ValueError, b[fn3].__getitem__, fnn) # multiple Subfields fn2 = func('f2') b[fn2] = 3 assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) # view of subfield view/copy assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3)) assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2)) view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])] assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,))) # non-ascii unicode field indexing is well behaved if not is_py3: raise SkipTest('non ascii unicode field indexing skipped; ' 'raises segfault on python 2.x') else: assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1) assert_raises(ValueError, a.__getitem__, sixu('\u03e0')) def test_field_names_deprecation(self): def collect_warnings(f, *args, **kwargs): with warnings.catch_warnings(record=True) as log: warnings.simplefilter("always") f(*args, **kwargs) return [w.category for w in log] a = np.zeros((1,), dtype=[('f1', 'i4'), ('f2', 'i4'), ('f3', [('sf1', 'i4')])]) a['f1'][0] = 1 a['f2'][0] = 2 a['f3'][0] = (3,) b = np.zeros((1,), dtype=[('f1', 'i4'), ('f2', 'i4'), ('f3', [('sf1', 'i4')])]) b['f1'][0] = 1 b['f2'][0] = 2 b['f3'][0] = (3,) # All the different functions raise a warning, but not an error, and # 'a' is not modified: assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)), [FutureWarning]) assert_equal(a, b) # Views also warn subset = a[['f1', 'f2']] subset_view = subset.view() assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10), [FutureWarning]) # But the write goes through: assert_equal(subset['f1'][0], 10) # Only one warning per multiple field indexing, though (even if there # are multiple views involved): assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), []) def test_record_hash(self): a = np.array([(1, 2), (1, 2)], dtype='i1,i2') a.flags.writeable = False b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) b.flags.writeable = False c = np.array([(1, 2), (3, 4)], dtype='i1,i2') c.flags.writeable = False self.assertTrue(hash(a[0]) == hash(a[1])) self.assertTrue(hash(a[0]) == hash(b[0])) self.assertTrue(hash(a[0]) != hash(b[1])) self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0]) def test_record_no_hash(self): a = np.array([(1, 2), (1, 2)], dtype='i1,i2') self.assertRaises(TypeError, hash, a[0]) def test_empty_structure_creation(self): # make sure these do not raise errors (gh-5631) np.array([()], dtype={'names': [], 'formats': [], 'offsets': [], 'itemsize': 12}) np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [], 'offsets': [], 'itemsize': 12}) class TestView(TestCase): def test_basic(self): x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype=[('r', np.int8), ('g', np.int8), ('b', np.int8), ('a', np.int8)]) # We must be specific about the endianness here: y = x.view(dtype='<i4') # ... and again without the keyword. z = x.view('<i4') assert_array_equal(y, z) assert_array_equal(y, [67305985, 134678021]) def _mean(a, **args): return a.mean(**args) def _var(a, **args): return a.var(**args) def _std(a, **args): return a.std(**args) class TestStats(TestCase): funcs = [_mean, _var, _std] def setUp(self): np.random.seed(range(3)) self.rmat = np.random.random((4, 5)) self.cmat = self.rmat + 1j * self.rmat self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat]) self.omat = self.omat.reshape(4, 5) def test_keepdims(self): mat = np.eye(3) for f in self.funcs: for axis in [0, 1]: res = f(mat, axis=axis, keepdims=True) assert_(res.ndim == mat.ndim) assert_(res.shape[axis] == 1) for axis in [None]: res = f(mat, axis=axis, keepdims=True) assert_(res.shape == (1, 1)) def test_out(self): mat = np.eye(3) for f in self.funcs: out = np.zeros(3) tgt = f(mat, axis=1) res = f(mat, axis=1, out=out) assert_almost_equal(res, out) assert_almost_equal(res, tgt) out = np.empty(2) assert_raises(ValueError, f, mat, axis=1, out=out) out = np.empty((2, 2)) assert_raises(ValueError, f, mat, axis=1, out=out) def test_dtype_from_input(self): icodes = np.typecodes['AllInteger'] fcodes = np.typecodes['AllFloat'] # object type for f in self.funcs: mat = np.array([[Decimal(1)]*3]*3) tgt = mat.dtype.type res = f(mat, axis=1).dtype.type assert_(res is tgt) # scalar case res = type(f(mat, axis=None)) assert_(res is Decimal) # integer types for f in self.funcs: for c in icodes: mat = np.eye(3, dtype=c) tgt = np.float64 res = f(mat, axis=1).dtype.type assert_(res is tgt) # scalar case res = f(mat, axis=None).dtype.type assert_(res is tgt) # mean for float types for f in [_mean]: for c in fcodes: mat = np.eye(3, dtype=c) tgt = mat.dtype.type res = f(mat, axis=1).dtype.type assert_(res is tgt) # scalar case res = f(mat, axis=None).dtype.type assert_(res is tgt) # var, std for float types for f in [_var, _std]: for c in fcodes: mat = np.eye(3, dtype=c) # deal with complex types tgt = mat.real.dtype.type res = f(mat, axis=1).dtype.type assert_(res is tgt) # scalar case res = f(mat, axis=None).dtype.type assert_(res is tgt) def test_dtype_from_dtype(self): mat = np.eye(3) # stats for integer types # FIXME: # this needs definition as there are lots places along the line # where type casting may take place. #for f in self.funcs: # for c in np.typecodes['AllInteger']: # tgt = np.dtype(c).type # res = f(mat, axis=1, dtype=c).dtype.type # assert_(res is tgt) # # scalar case # res = f(mat, axis=None, dtype=c).dtype.type # assert_(res is tgt) # stats for float types for f in self.funcs: for c in np.typecodes['AllFloat']: tgt = np.dtype(c).type res = f(mat, axis=1, dtype=c).dtype.type assert_(res is tgt) # scalar case res = f(mat, axis=None, dtype=c).dtype.type assert_(res is tgt) def test_ddof(self): for f in [_var]: for ddof in range(3): dim = self.rmat.shape[1] tgt = f(self.rmat, axis=1) * dim res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof) for f in [_std]: for ddof in range(3): dim = self.rmat.shape[1] tgt = f(self.rmat, axis=1) * np.sqrt(dim) res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof) assert_almost_equal(res, tgt) assert_almost_equal(res, tgt) def test_ddof_too_big(self): dim = self.rmat.shape[1] for f in [_var, _std]: for ddof in range(dim, dim + 2): with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') res = f(self.rmat, axis=1, ddof=ddof) assert_(not (res < 0).any()) assert_(len(w) > 0) assert_(issubclass(w[0].category, RuntimeWarning)) def test_empty(self): A = np.zeros((0, 3)) for f in self.funcs: for axis in [0, None]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(f(A, axis=axis)).all()) assert_(len(w) > 0) assert_(issubclass(w[0].category, RuntimeWarning)) for axis in [1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_equal(f(A, axis=axis), np.zeros([])) def test_mean_values(self): for mat in [self.rmat, self.cmat, self.omat]: for axis in [0, 1]: tgt = mat.sum(axis=axis) res = _mean(mat, axis=axis) * mat.shape[axis] assert_almost_equal(res, tgt) for axis in [None]: tgt = mat.sum(axis=axis) res = _mean(mat, axis=axis) * np.prod(mat.shape) assert_almost_equal(res, tgt) def test_var_values(self): for mat in [self.rmat, self.cmat, self.omat]: for axis in [0, 1, None]: msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) tgt = msqr - mean * mean.conjugate() res = _var(mat, axis=axis) assert_almost_equal(res, tgt) def test_std_values(self): for mat in [self.rmat, self.cmat, self.omat]: for axis in [0, 1, None]: tgt = np.sqrt(_var(mat, axis=axis)) res = _std(mat, axis=axis) assert_almost_equal(res, tgt) def test_subclass(self): class TestArray(np.ndarray): def __new__(cls, data, info): result = np.array(data) result = result.view(cls) result.info = info return result def __array_finalize__(self, obj): self.info = getattr(obj, "info", '') dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') res = dat.mean(1) assert_(res.info == dat.info) res = dat.std(1) assert_(res.info == dat.info) res = dat.var(1) assert_(res.info == dat.info) class TestVdot(TestCase): def test_basic(self): dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger'] dt_complex = np.typecodes['Complex'] # test real a = np.eye(3) for dt in dt_numeric + 'O': b = a.astype(dt) res = np.vdot(b, b) assert_(np.isscalar(res)) assert_equal(np.vdot(b, b), 3) # test complex a = np.eye(3) * 1j for dt in dt_complex + 'O': b = a.astype(dt) res = np.vdot(b, b) assert_(np.isscalar(res)) assert_equal(np.vdot(b, b), 3) # test boolean b = np.eye(3, dtype=np.bool) res = np.vdot(b, b) assert_(np.isscalar(res)) assert_equal(np.vdot(b, b), True) def test_vdot_array_order(self): a = np.array([[1, 2], [3, 4]], order='C') b = np.array([[1, 2], [3, 4]], order='F') res = np.vdot(a, a) # integer arrays are exact assert_equal(np.vdot(a, b), res) assert_equal(np.vdot(b, a), res) assert_equal(np.vdot(b, b), res) def test_vdot_uncontiguous(self): for size in [2, 1000]: # Different sizes match different branches in vdot. a = np.zeros((size, 2, 2)) b = np.zeros((size, 2, 2)) a[:, 0, 0] = np.arange(size) b[:, 0, 0] = np.arange(size) + 1 # Make a and b uncontiguous: a = a[..., 0] b = b[..., 0] assert_equal(np.vdot(a, b), np.vdot(a.flatten(), b.flatten())) assert_equal(np.vdot(a, b.copy()), np.vdot(a.flatten(), b.flatten())) assert_equal(np.vdot(a.copy(), b), np.vdot(a.flatten(), b.flatten())) assert_equal(np.vdot(a.copy('F'), b), np.vdot(a.flatten(), b.flatten())) assert_equal(np.vdot(a, b.copy('F')), np.vdot(a.flatten(), b.flatten())) class TestDot(TestCase): def setUp(self): np.random.seed(128) self.A = np.random.rand(4, 2) self.b1 = np.random.rand(2, 1) self.b2 = np.random.rand(2) self.b3 = np.random.rand(1, 2) self.b4 = np.random.rand(4) self.N = 7 def test_dotmatmat(self): A = self.A res = np.dot(A.transpose(), A) tgt = np.array([[1.45046013, 0.86323640], [0.86323640, 0.84934569]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec(self): A, b1 = self.A, self.b1 res = np.dot(A, b1) tgt = np.array([[0.32114320], [0.04889721], [0.15696029], [0.33612621]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec2(self): A, b2 = self.A, self.b2 res = np.dot(A, b2) tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat(self): A, b4 = self.A, self.b4 res = np.dot(b4, A) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat2(self): b3, A = self.b3, self.A res = np.dot(b3, A.transpose()) tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat3(self): A, b4 = self.A, self.b4 res = np.dot(A.transpose(), b4) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecouter(self): b1, b3 = self.b1, self.b3 res = np.dot(b1, b3) tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecinner(self): b1, b3 = self.b1, self.b3 res = np.dot(b3, b1) tgt = np.array([[ 0.23129668]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotcolumnvect1(self): b1 = np.ones((3, 1)) b2 = [5.3] res = np.dot(b1, b2) tgt = np.array([5.3, 5.3, 5.3]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotcolumnvect2(self): b1 = np.ones((3, 1)).transpose() b2 = [6.2] res = np.dot(b2, b1) tgt = np.array([6.2, 6.2, 6.2]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar(self): np.random.seed(100) b1 = np.random.rand(1, 1) b2 = np.random.rand(1, 4) res = np.dot(b1, b2) tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar2(self): np.random.seed(100) b1 = np.random.rand(4, 1) b2 = np.random.rand(1, 1) res = np.dot(b1, b2) tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]]) assert_almost_equal(res, tgt, decimal=self.N) def test_all(self): dims = [(), (1,), (1, 1)] dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)] for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)): b1 = np.zeros(dim1) b2 = np.zeros(dim2) res = np.dot(b1, b2) tgt = np.zeros(dim) assert_(res.shape == tgt.shape) assert_almost_equal(res, tgt, decimal=self.N) def test_vecobject(self): class Vec(object): def __init__(self, sequence=None): if sequence is None: sequence = [] self.array = np.array(sequence) def __add__(self, other): out = Vec() out.array = self.array + other.array return out def __sub__(self, other): out = Vec() out.array = self.array - other.array return out def __mul__(self, other): # with scalar out = Vec(self.array.copy()) out.array *= other return out def __rmul__(self, other): return self*other U_non_cont = np.transpose([[1., 1.], [1., 2.]]) U_cont = np.ascontiguousarray(U_non_cont) x = np.array([Vec([1., 0.]), Vec([0., 1.])]) zeros = np.array([Vec([0., 0.]), Vec([0., 0.])]) zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x) assert_equal(zeros[0].array, zeros_test[0].array) assert_equal(zeros[1].array, zeros_test[1].array) def test_dot_2args(self): from numpy.core.multiarray import dot a = np.array([[1, 2], [3, 4]], dtype=float) b = np.array([[1, 0], [1, 1]], dtype=float) c = np.array([[3, 2], [7, 4]], dtype=float) d = dot(a, b) assert_allclose(c, d) def test_dot_3args(self): from numpy.core.multiarray import dot np.random.seed(22) f = np.random.random_sample((1024, 16)) v = np.random.random_sample((16, 32)) r = np.empty((1024, 32)) for i in range(12): dot(f, v, r) assert_equal(sys.getrefcount(r), 2) r2 = dot(f, v, out=None) assert_array_equal(r2, r) assert_(r is dot(f, v, out=r)) v = v[:, 0].copy() # v.shape == (16,) r = r[:, 0].copy() # r.shape == (1024,) r2 = dot(f, v) assert_(r is dot(f, v, r)) assert_array_equal(r2, r) def test_dot_3args_errors(self): from numpy.core.multiarray import dot np.random.seed(22) f = np.random.random_sample((1024, 16)) v = np.random.random_sample((16, 32)) r = np.empty((1024, 31)) assert_raises(ValueError, dot, f, v, r) r = np.empty((1024,)) assert_raises(ValueError, dot, f, v, r) r = np.empty((32,)) assert_raises(ValueError, dot, f, v, r) r = np.empty((32, 1024)) assert_raises(ValueError, dot, f, v, r) assert_raises(ValueError, dot, f, v, r.T) r = np.empty((1024, 64)) assert_raises(ValueError, dot, f, v, r[:, ::2]) assert_raises(ValueError, dot, f, v, r[:, :32]) r = np.empty((1024, 32), dtype=np.float32) assert_raises(ValueError, dot, f, v, r) r = np.empty((1024, 32), dtype=int) assert_raises(ValueError, dot, f, v, r) def test_dot_array_order(self): a = np.array([[1, 2], [3, 4]], order='C') b = np.array([[1, 2], [3, 4]], order='F') res = np.dot(a, a) # integer arrays are exact assert_equal(np.dot(a, b), res) assert_equal(np.dot(b, a), res) assert_equal(np.dot(b, b), res) def test_dot_scalar_and_matrix_of_objects(self): # Ticket #2469 arr = np.matrix([1, 2], dtype=object) desired = np.matrix([[3, 6]], dtype=object) assert_equal(np.dot(arr, 3), desired) assert_equal(np.dot(3, arr), desired) def test_dot_override(self): # Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844 return class A(object): def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): return "A" class B(object): def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): return NotImplemented a = A() b = B() c = np.array([[1]]) assert_equal(np.dot(a, b), "A") assert_equal(c.dot(a), "A") assert_raises(TypeError, np.dot, b, c) assert_raises(TypeError, c.dot, b) def test_accelerate_framework_sgemv_fix(self): def aligned_array(shape, align, dtype, order='C'): d = dtype(0) N = np.prod(shape) tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) address = tmp.__array_interface__["data"][0] for offset in range(align): if (address + offset) % align == 0: break tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) return tmp.reshape(shape, order=order) def as_aligned(arr, align, dtype, order='C'): aligned = aligned_array(arr.shape, align, dtype, order) aligned[:] = arr[:] return aligned def assert_dot_close(A, X, desired): assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7) m = aligned_array(100, 15, np.float32) s = aligned_array((100, 100), 15, np.float32) np.dot(s, m) # this will always segfault if the bug is present testdata = itertools.product((15,32), (10000,), (200,89), ('C','F')) for align, m, n, a_order in testdata: # Calculation in double precision A_d = np.random.rand(m, n) X_d = np.random.rand(n) desired = np.dot(A_d, X_d) # Calculation with aligned single precision A_f = as_aligned(A_d, align, np.float32, order=a_order) X_f = as_aligned(X_d, align, np.float32) assert_dot_close(A_f, X_f, desired) # Strided A rows A_d_2 = A_d[::2] desired = np.dot(A_d_2, X_d) A_f_2 = A_f[::2] assert_dot_close(A_f_2, X_f, desired) # Strided A columns, strided X vector A_d_22 = A_d_2[:, ::2] X_d_2 = X_d[::2] desired = np.dot(A_d_22, X_d_2) A_f_22 = A_f_2[:, ::2] X_f_2 = X_f[::2] assert_dot_close(A_f_22, X_f_2, desired) # Check the strides are as expected if a_order == 'F': assert_equal(A_f_22.strides, (8, 8 * m)) else: assert_equal(A_f_22.strides, (8 * n, 8)) assert_equal(X_f_2.strides, (8,)) # Strides in A rows + cols only X_f_2c = as_aligned(X_f_2, align, np.float32) assert_dot_close(A_f_22, X_f_2c, desired) # Strides just in A cols A_d_12 = A_d[:, ::2] desired = np.dot(A_d_12, X_d_2) A_f_12 = A_f[:, ::2] assert_dot_close(A_f_12, X_f_2c, desired) # Strides in A cols and X assert_dot_close(A_f_12, X_f_2, desired) class MatmulCommon(): """Common tests for '@' operator and numpy.matmul. Do not derive from TestCase to avoid nose running it. """ # Should work with these types. Will want to add # "O" at some point types = "?bhilqBHILQefdgFDG" def test_exceptions(self): dims = [ ((1,), (2,)), # mismatched vector vector ((2, 1,), (2,)), # mismatched matrix vector ((2,), (1, 2)), # mismatched vector matrix ((1, 2), (3, 1)), # mismatched matrix matrix ((1,), ()), # vector scalar ((), (1)), # scalar vector ((1, 1), ()), # matrix scalar ((), (1, 1)), # scalar matrix ((2, 2, 1), (3, 1, 2)), # cannot broadcast ] for dt, (dm1, dm2) in itertools.product(self.types, dims): a = np.ones(dm1, dtype=dt) b = np.ones(dm2, dtype=dt) assert_raises(ValueError, self.matmul, a, b) def test_shapes(self): dims = [ ((1, 1), (2, 1, 1)), # broadcast first argument ((2, 1, 1), (1, 1)), # broadcast second argument ((2, 1, 1), (2, 1, 1)), # matrix stack sizes match ] for dt, (dm1, dm2) in itertools.product(self.types, dims): a = np.ones(dm1, dtype=dt) b = np.ones(dm2, dtype=dt) res = self.matmul(a, b) assert_(res.shape == (2, 1, 1)) # vector vector returns scalars. for dt in self.types: a = np.ones((2,), dtype=dt) b = np.ones((2,), dtype=dt) c = self.matmul(a, b) assert_(np.array(c).shape == ()) def test_result_types(self): mat = np.ones((1,1)) vec = np.ones((1,)) for dt in self.types: m = mat.astype(dt) v = vec.astype(dt) for arg in [(m, v), (v, m), (m, m)]: res = self.matmul(*arg) assert_(res.dtype == dt) # vector vector returns scalars res = self.matmul(v, v) assert_(type(res) is np.dtype(dt).type) def test_vector_vector_values(self): vec = np.array([1, 2]) tgt = 5 for dt in self.types[1:]: v1 = vec.astype(dt) res = self.matmul(v1, v1) assert_equal(res, tgt) # boolean type vec = np.array([True, True], dtype='?') res = self.matmul(vec, vec) assert_equal(res, True) def test_vector_matrix_values(self): vec = np.array([1, 2]) mat1 = np.array([[1, 2], [3, 4]]) mat2 = np.stack([mat1]*2, axis=0) tgt1 = np.array([7, 10]) tgt2 = np.stack([tgt1]*2, axis=0) for dt in self.types[1:]: v = vec.astype(dt) m1 = mat1.astype(dt) m2 = mat2.astype(dt) res = self.matmul(v, m1) assert_equal(res, tgt1) res = self.matmul(v, m2) assert_equal(res, tgt2) # boolean type vec = np.array([True, False]) mat1 = np.array([[True, False], [False, True]]) mat2 = np.stack([mat1]*2, axis=0) tgt1 = np.array([True, False]) tgt2 = np.stack([tgt1]*2, axis=0) res = self.matmul(vec, mat1) assert_equal(res, tgt1) res = self.matmul(vec, mat2) assert_equal(res, tgt2) def test_matrix_vector_values(self): vec = np.array([1, 2]) mat1 = np.array([[1, 2], [3, 4]]) mat2 = np.stack([mat1]*2, axis=0) tgt1 = np.array([5, 11]) tgt2 = np.stack([tgt1]*2, axis=0) for dt in self.types[1:]: v = vec.astype(dt) m1 = mat1.astype(dt) m2 = mat2.astype(dt) res = self.matmul(m1, v) assert_equal(res, tgt1) res = self.matmul(m2, v) assert_equal(res, tgt2) # boolean type vec = np.array([True, False]) mat1 = np.array([[True, False], [False, True]]) mat2 = np.stack([mat1]*2, axis=0) tgt1 = np.array([True, False]) tgt2 = np.stack([tgt1]*2, axis=0) res = self.matmul(vec, mat1) assert_equal(res, tgt1) res = self.matmul(vec, mat2) assert_equal(res, tgt2) def test_matrix_matrix_values(self): mat1 = np.array([[1, 2], [3, 4]]) mat2 = np.array([[1, 0], [1, 1]]) mat12 = np.stack([mat1, mat2], axis=0) mat21 = np.stack([mat2, mat1], axis=0) tgt11 = np.array([[7, 10], [15, 22]]) tgt12 = np.array([[3, 2], [7, 4]]) tgt21 = np.array([[1, 2], [4, 6]]) tgt12_21 = np.stack([tgt12, tgt21], axis=0) tgt11_12 = np.stack((tgt11, tgt12), axis=0) tgt11_21 = np.stack((tgt11, tgt21), axis=0) for dt in self.types[1:]: m1 = mat1.astype(dt) m2 = mat2.astype(dt) m12 = mat12.astype(dt) m21 = mat21.astype(dt) # matrix @ matrix res = self.matmul(m1, m2) assert_equal(res, tgt12) res = self.matmul(m2, m1) assert_equal(res, tgt21) # stacked @ matrix res = self.matmul(m12, m1) assert_equal(res, tgt11_21) # matrix @ stacked res = self.matmul(m1, m12) assert_equal(res, tgt11_12) # stacked @ stacked res = self.matmul(m12, m21) assert_equal(res, tgt12_21) # boolean type m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_) m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_) m12 = np.stack([m1, m2], axis=0) m21 = np.stack([m2, m1], axis=0) tgt11 = m1 tgt12 = m1 tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_) tgt12_21 = np.stack([tgt12, tgt21], axis=0) tgt11_12 = np.stack((tgt11, tgt12), axis=0) tgt11_21 = np.stack((tgt11, tgt21), axis=0) # matrix @ matrix res = self.matmul(m1, m2) assert_equal(res, tgt12) res = self.matmul(m2, m1) assert_equal(res, tgt21) # stacked @ matrix res = self.matmul(m12, m1) assert_equal(res, tgt11_21) # matrix @ stacked res = self.matmul(m1, m12) assert_equal(res, tgt11_12) # stacked @ stacked res = self.matmul(m12, m21) assert_equal(res, tgt12_21) def test_numpy_ufunc_override(self): # Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844 return class A(np.ndarray): def __new__(cls, *args, **kwargs): return np.array(*args, **kwargs).view(cls) def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): return "A" class B(np.ndarray): def __new__(cls, *args, **kwargs): return np.array(*args, **kwargs).view(cls) def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): return NotImplemented a = A([1, 2]) b = B([1, 2]) c = np.ones(2) assert_equal(self.matmul(a, b), "A") assert_equal(self.matmul(b, a), "A") assert_raises(TypeError, self.matmul, b, c) class TestMatmul(MatmulCommon, TestCase): matmul = np.matmul def test_out_arg(self): a = np.ones((2, 2), dtype=np.float) b = np.ones((2, 2), dtype=np.float) tgt = np.full((2,2), 2, dtype=np.float) # test as positional argument msg = "out positional argument" out = np.zeros((2, 2), dtype=np.float) self.matmul(a, b, out) assert_array_equal(out, tgt, err_msg=msg) # test as keyword argument msg = "out keyword argument" out = np.zeros((2, 2), dtype=np.float) self.matmul(a, b, out=out) assert_array_equal(out, tgt, err_msg=msg) # test out with not allowed type cast (safe casting) # einsum and cblas raise different error types, so # use Exception. msg = "out argument with illegal cast" out = np.zeros((2, 2), dtype=np.int32) assert_raises(Exception, self.matmul, a, b, out=out) # skip following tests for now, cblas does not allow non-contiguous # outputs and consistency with dot would require same type, # dimensions, subtype, and c_contiguous. # test out with allowed type cast # msg = "out argument with allowed cast" # out = np.zeros((2, 2), dtype=np.complex128) # self.matmul(a, b, out=out) # assert_array_equal(out, tgt, err_msg=msg) # test out non-contiguous # msg = "out argument with non-contiguous layout" # c = np.zeros((2, 2, 2), dtype=np.float) # self.matmul(a, b, out=c[..., 0]) # assert_array_equal(c, tgt, err_msg=msg) if sys.version_info[:2] >= (3, 5): class TestMatmulOperator(MatmulCommon, TestCase): import operator matmul = operator.matmul def test_array_priority_override(self): class A(object): __array_priority__ = 1000 def __matmul__(self, other): return "A" def __rmatmul__(self, other): return "A" a = A() b = np.ones(2) assert_equal(self.matmul(a, b), "A") assert_equal(self.matmul(b, a), "A") def test_matmul_inplace(): # It would be nice to support in-place matmul eventually, but for now # we don't have a working implementation, so better just to error out # and nudge people to writing "a = a @ b". a = np.eye(3) b = np.eye(3) assert_raises(TypeError, a.__imatmul__, b) import operator assert_raises(TypeError, operator.imatmul, a, b) # we avoid writing the token `exec` so as not to crash python 2's # parser exec_ = getattr(builtins, "exec") assert_raises(TypeError, exec_, "a @= b", globals(), locals()) class TestInner(TestCase): def test_inner_scalar_and_matrix_of_objects(self): # Ticket #4482 arr = np.matrix([1, 2], dtype=object) desired = np.matrix([[3, 6]], dtype=object) assert_equal(np.inner(arr, 3), desired) assert_equal(np.inner(3, arr), desired) def test_vecself(self): # Ticket 844. # Inner product of a vector with itself segfaults or give # meaningless result a = np.zeros(shape=(1, 80), dtype=np.float64) p = np.inner(a, a) assert_almost_equal(p, 0, decimal=14) def test_inner_product_with_various_contiguities(self): # github issue 6532 for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': # check an inner product involving a matrix transpose A = np.array([[1, 2], [3, 4]], dtype=dt) B = np.array([[1, 3], [2, 4]], dtype=dt) C = np.array([1, 1], dtype=dt) desired = np.array([4, 6], dtype=dt) assert_equal(np.inner(A.T, C), desired) assert_equal(np.inner(B, C), desired) # check an inner product involving an aliased and reversed view a = np.arange(5).astype(dt) b = a[::-1] desired = np.array(10, dtype=dt).item() assert_equal(np.inner(b, a), desired) class TestSummarization(TestCase): def test_1d(self): A = np.arange(1001) strA = '[ 0 1 2 ..., 998 999 1000]' assert_(str(A) == strA) reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' assert_(repr(A) == reprA) def test_2d(self): A = np.arange(1002).reshape(2, 501) strA = '[[ 0 1 2 ..., 498 499 500]\n' \ ' [ 501 502 503 ..., 999 1000 1001]]' assert_(str(A) == strA) reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ ' [ 501, 502, 503, ..., 999, 1000, 1001]])' assert_(repr(A) == reprA) class TestChoose(TestCase): def setUp(self): self.x = 2*np.ones((3,), dtype=int) self.y = 3*np.ones((3,), dtype=int) self.x2 = 2*np.ones((2, 3), dtype=int) self.y2 = 3*np.ones((2, 3), dtype=int) self.ind = [0, 0, 1] def test_basic(self): A = np.choose(self.ind, (self.x, self.y)) assert_equal(A, [2, 2, 3]) def test_broadcast1(self): A = np.choose(self.ind, (self.x2, self.y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) def test_broadcast2(self): A = np.choose(self.ind, (self.x, self.y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) # TODO: test for multidimensional NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} class TestNeighborhoodIter(TestCase): # Simple, 2d tests def _test_simple2d(self, dt): # Test zero and one padding for simple data type x = np.array([[0, 1], [2, 3]], dtype=dt) r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt), np.array([[0, 0, 0], [0, 1, 0]], dtype=dt), np.array([[0, 0, 1], [0, 2, 3]], dtype=dt), np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)] l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero']) assert_array_equal(l, r) r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt), np.array([[1, 1, 1], [0, 1, 1]], dtype=dt), np.array([[1, 0, 1], [1, 2, 3]], dtype=dt), np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)] l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one']) assert_array_equal(l, r) r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt), np.array([[4, 4, 4], [0, 1, 4]], dtype=dt), np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant']) assert_array_equal(l, r) def test_simple2d(self): self._test_simple2d(np.float) def test_simple2d_object(self): self._test_simple2d(Decimal) def _test_mirror2d(self, dt): x = np.array([[0, 1], [2, 3]], dtype=dt) r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), np.array([[0, 1, 1], [0, 1, 1]], dtype=dt), np.array([[0, 0, 1], [2, 2, 3]], dtype=dt), np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)] l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror']) assert_array_equal(l, r) def test_mirror2d(self): self._test_mirror2d(np.float) def test_mirror2d_object(self): self._test_mirror2d(Decimal) # Simple, 1d tests def _test_simple(self, dt): # Test padding with constant values x = np.linspace(1, 5, 5).astype(dt) r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]] l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero']) assert_array_equal(l, r) r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]] l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one']) assert_array_equal(l, r) r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]] l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant']) assert_array_equal(l, r) def test_simple_float(self): self._test_simple(np.float) def test_simple_object(self): self._test_simple(Decimal) # Test mirror modes def _test_mirror(self, dt): x = np.linspace(1, 5, 5).astype(dt) r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror']) self.assertTrue([i.dtype == dt for i in l]) assert_array_equal(l, r) def test_mirror(self): self._test_mirror(np.float) def test_mirror_object(self): self._test_mirror(Decimal) # Circular mode def _test_circular(self, dt): x = np.linspace(1, 5, 5).astype(dt) r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt) l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular']) assert_array_equal(l, r) def test_circular(self): self._test_circular(np.float) def test_circular_object(self): self._test_circular(Decimal) # Test stacking neighborhood iterators class TestStackedNeighborhoodIter(TestCase): # Simple, 1d test: stacking 2 constant-padded neigh iterators def test_simple_const(self): dt = np.float64 # Test zero and one padding for simple data type x = np.array([1, 2, 3], dtype=dt) r = [np.array([0], dtype=dt), np.array([0], dtype=dt), np.array([1], dtype=dt), np.array([2], dtype=dt), np.array([3], dtype=dt), np.array([0], dtype=dt), np.array([0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero']) assert_array_equal(l, r) r = [np.array([1, 0, 1], dtype=dt), np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt), np.array([3, 0, 1], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one']) assert_array_equal(l, r) # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and # mirror padding def test_simple_mirror(self): dt = np.float64 # Stacking zero on top of mirror x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 1, 1], dtype=dt), np.array([1, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 3], dtype=dt), np.array([3, 3, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero']) assert_array_equal(l, r) # Stacking mirror on top of zero x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 0, 0], dtype=dt), np.array([0, 0, 1], dtype=dt), np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror']) assert_array_equal(l, r) # Stacking mirror on top of zero: 2nd x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt), np.array([3, 0, 0], dtype=dt), np.array([0, 0, 3], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror']) assert_array_equal(l, r) # Stacking mirror on top of zero: 3rd x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 0, 0, 1, 2], dtype=dt), np.array([0, 0, 1, 2, 3], dtype=dt), np.array([0, 1, 2, 3, 0], dtype=dt), np.array([1, 2, 3, 0, 0], dtype=dt), np.array([2, 3, 0, 0, 3], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror']) assert_array_equal(l, r) # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and # circular padding def test_simple_circular(self): dt = np.float64 # Stacking zero on top of mirror x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 3, 1], dtype=dt), np.array([3, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 1], dtype=dt), np.array([3, 1, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero']) assert_array_equal(l, r) # Stacking mirror on top of zero x = np.array([1, 2, 3], dtype=dt) r = [np.array([3, 0, 0], dtype=dt), np.array([0, 0, 1], dtype=dt), np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular']) assert_array_equal(l, r) # Stacking mirror on top of zero: 2nd x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt), np.array([3, 0, 0], dtype=dt), np.array([0, 0, 1], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular']) assert_array_equal(l, r) # Stacking mirror on top of zero: 3rd x = np.array([1, 2, 3], dtype=dt) r = [np.array([3, 0, 0, 1, 2], dtype=dt), np.array([0, 0, 1, 2, 3], dtype=dt), np.array([0, 1, 2, 3, 0], dtype=dt), np.array([1, 2, 3, 0, 0], dtype=dt), np.array([2, 3, 0, 0, 1], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular']) assert_array_equal(l, r) # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator # being strictly within the array def test_simple_strict_within(self): dt = np.float64 # Stacking zero on top of zero, first neighborhood strictly inside the # array x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 2, 3, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero']) assert_array_equal(l, r) # Stacking mirror on top of zero, first neighborhood strictly inside the # array x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 2, 3, 3], dtype=dt)] l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror']) assert_array_equal(l, r) # Stacking mirror on top of zero, first neighborhood strictly inside the # array x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 2, 3, 1], dtype=dt)] l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular']) assert_array_equal(l, r) class TestWarnings(object): def test_complex_warning(self): x = np.array([1, 2]) y = np.array([1-2j, 1+2j]) with warnings.catch_warnings(): warnings.simplefilter("error", np.ComplexWarning) assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y) assert_equal(x, [1, 2]) class TestMinScalarType(object): def test_usigned_shortshort(self): dt = np.min_scalar_type(2**8-1) wanted = np.dtype('uint8') assert_equal(wanted, dt) def test_usigned_short(self): dt = np.min_scalar_type(2**16-1) wanted = np.dtype('uint16') assert_equal(wanted, dt) def test_usigned_int(self): dt = np.min_scalar_type(2**32-1) wanted = np.dtype('uint32') assert_equal(wanted, dt) def test_usigned_longlong(self): dt = np.min_scalar_type(2**63-1) wanted = np.dtype('uint64') assert_equal(wanted, dt) def test_object(self): dt = np.min_scalar_type(2**64) wanted = np.dtype('O') assert_equal(wanted, dt) if sys.version_info[:2] == (2, 6): from numpy.core.multiarray import memorysimpleview as memoryview from numpy.core._internal import _dtype_from_pep3118 class TestPEP3118Dtype(object): def _check(self, spec, wanted): dt = np.dtype(wanted) if isinstance(wanted, list) and isinstance(wanted[-1], tuple): if wanted[-1][0] == '': names = list(dt.names) names[-1] = '' dt.names = tuple(names) assert_equal(_dtype_from_pep3118(spec), dt, err_msg="spec %r != dtype %r" % (spec, wanted)) def test_native_padding(self): align = np.dtype('i').alignment for j in range(8): if j == 0: s = 'bi' else: s = 'b%dxi' % j self._check('@'+s, {'f0': ('i1', 0), 'f1': ('i', align*(1 + j//align))}) self._check('='+s, {'f0': ('i1', 0), 'f1': ('i', 1+j)}) def test_native_padding_2(self): # Native padding should work also for structs and sub-arrays self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)}) self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)}) def test_trailing_padding(self): # Trailing padding should be included, *and*, the item size # should match the alignment if in aligned mode align = np.dtype('i').alignment def VV(n): return 'V%d' % (align*(1 + (n-1)//align)) self._check('ix', [('f0', 'i'), ('', VV(1))]) self._check('ixx', [('f0', 'i'), ('', VV(2))]) self._check('ixxx', [('f0', 'i'), ('', VV(3))]) self._check('ixxxx', [('f0', 'i'), ('', VV(4))]) self._check('i7x', [('f0', 'i'), ('', VV(7))]) self._check('^ix', [('f0', 'i'), ('', 'V1')]) self._check('^ixx', [('f0', 'i'), ('', 'V2')]) self._check('^ixxx', [('f0', 'i'), ('', 'V3')]) self._check('^ixxxx', [('f0', 'i'), ('', 'V4')]) self._check('^i7x', [('f0', 'i'), ('', 'V7')]) def test_native_padding_3(self): dt = np.dtype( [('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')], align=True) self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt) dt = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt) def test_padding_with_array_inside_struct(self): dt = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')], align=True) self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt) def test_byteorder_inside_struct(self): # The byte order after @T{=i} should be '=', not '@'. # Check this by noting the absence of native alignment. self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0), 'f1': ('i', 5)}) def test_intra_padding(self): # Natively aligned sub-arrays may require some internal padding align = np.dtype('i').alignment def VV(n): return 'V%d' % (align*(1 + (n-1)//align)) self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,))) class TestNewBufferProtocol(object): def _check_roundtrip(self, obj): obj = np.asarray(obj) x = memoryview(obj) y = np.asarray(x) y2 = np.array(x) assert_(not y.flags.owndata) assert_(y2.flags.owndata) assert_equal(y.dtype, obj.dtype) assert_equal(y.shape, obj.shape) assert_array_equal(obj, y) assert_equal(y2.dtype, obj.dtype) assert_equal(y2.shape, obj.shape) assert_array_equal(obj, y2) def test_roundtrip(self): x = np.array([1, 2, 3, 4, 5], dtype='i4') self._check_roundtrip(x) x = np.array([[1, 2], [3, 4]], dtype=np.float64) self._check_roundtrip(x) x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] self._check_roundtrip(x) dt = [('a', 'b'), ('b', 'h'), ('c', 'i'), ('d', 'l'), ('dx', 'q'), ('e', 'B'), ('f', 'H'), ('g', 'I'), ('h', 'L'), ('hx', 'Q'), ('i', np.single), ('j', np.double), ('k', np.longdouble), ('ix', np.csingle), ('jx', np.cdouble), ('kx', np.clongdouble), ('l', 'S4'), ('m', 'U4'), ('n', 'V3'), ('o', '?'), ('p', np.half), ] x = np.array( [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)], dtype=dt) self._check_roundtrip(x) x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))]) self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='>i2') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='<i2') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='>i4') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='<i4') self._check_roundtrip(x) # check long long can be represented as non-native x = np.array([1, 2, 3], dtype='>q') self._check_roundtrip(x) # Native-only data types can be passed through the buffer interface # only in native byte order if sys.byteorder == 'little': x = np.array([1, 2, 3], dtype='>g') assert_raises(ValueError, self._check_roundtrip, x) x = np.array([1, 2, 3], dtype='<g') self._check_roundtrip(x) else: x = np.array([1, 2, 3], dtype='>g') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='<g') assert_raises(ValueError, self._check_roundtrip, x) def test_roundtrip_half(self): half_list = [ 1.0, -2.0, 6.5504 * 10**4, # (max half precision) 2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal) 2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal) 0.0, -0.0, float('+inf'), float('-inf'), 0.333251953125, # ~= 1/3 ] x = np.array(half_list, dtype='>e') self._check_roundtrip(x) x = np.array(half_list, dtype='<e') self._check_roundtrip(x) def test_roundtrip_single_types(self): for typ in np.typeDict.values(): dtype = np.dtype(typ) if dtype.char in 'Mm': # datetimes cannot be used in buffers continue if dtype.char == 'V': # skip void continue x = np.zeros(4, dtype=dtype) self._check_roundtrip(x) if dtype.char not in 'qQgG': dt = dtype.newbyteorder('<') x = np.zeros(4, dtype=dt) self._check_roundtrip(x) dt = dtype.newbyteorder('>') x = np.zeros(4, dtype=dt) self._check_roundtrip(x) def test_roundtrip_scalar(self): # Issue #4015. self._check_roundtrip(0) def test_export_simple_1d(self): x = np.array([1, 2, 3, 4, 5], dtype='i') y = memoryview(x) assert_equal(y.format, 'i') assert_equal(y.shape, (5,)) assert_equal(y.ndim, 1) assert_equal(y.strides, (4,)) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 4) def test_export_simple_nd(self): x = np.array([[1, 2], [3, 4]], dtype=np.float64) y = memoryview(x) assert_equal(y.format, 'd') assert_equal(y.shape, (2, 2)) assert_equal(y.ndim, 2) assert_equal(y.strides, (16, 8)) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 8) def test_export_discontiguous(self): x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] y = memoryview(x) assert_equal(y.format, 'f') assert_equal(y.shape, (3, 3)) assert_equal(y.ndim, 2) assert_equal(y.strides, (36, 4)) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 4) def test_export_record(self): dt = [('a', 'b'), ('b', 'h'), ('c', 'i'), ('d', 'l'), ('dx', 'q'), ('e', 'B'), ('f', 'H'), ('g', 'I'), ('h', 'L'), ('hx', 'Q'), ('i', np.single), ('j', np.double), ('k', np.longdouble), ('ix', np.csingle), ('jx', np.cdouble), ('kx', np.clongdouble), ('l', 'S4'), ('m', 'U4'), ('n', 'V3'), ('o', '?'), ('p', np.half), ] x = np.array( [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)], dtype=dt) y = memoryview(x) assert_equal(y.shape, (1,)) assert_equal(y.ndim, 1) assert_equal(y.suboffsets, EMPTY) sz = sum([np.dtype(b).itemsize for a, b in dt]) if np.dtype('l').itemsize == 4: assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') else: assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides if not (np.ones(1).strides[0] == np.iinfo(np.intp).max): assert_equal(y.strides, (sz,)) assert_equal(y.itemsize, sz) def test_export_subarray(self): x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) y = memoryview(x) assert_equal(y.format, 'T{(2,2)i:a:}') assert_equal(y.shape, EMPTY) assert_equal(y.ndim, 0) assert_equal(y.strides, EMPTY) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 16) def test_export_endian(self): x = np.array([1, 2, 3], dtype='>i') y = memoryview(x) if sys.byteorder == 'little': assert_equal(y.format, '>i') else: assert_equal(y.format, 'i') x = np.array([1, 2, 3], dtype='<i') y = memoryview(x) if sys.byteorder == 'little': assert_equal(y.format, 'i') else: assert_equal(y.format, '<i') def test_export_flags(self): # Check SIMPLE flag, see also gh-3613 (exception should be BufferError) assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',)) def test_padding(self): for j in range(8): x = np.array([(1,), (2,)], dtype={'f0': (int, j)}) self._check_roundtrip(x) def test_reference_leak(self): count_1 = sys.getrefcount(np.core._internal) a = np.zeros(4) b = memoryview(a) c = np.asarray(b) count_2 = sys.getrefcount(np.core._internal) assert_equal(count_1, count_2) del c # avoid pyflakes unused variable warning. def test_padded_struct_array(self): dt1 = np.dtype( [('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')], align=True) x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1) self._check_roundtrip(x1) dt2 = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')], align=True) x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2) self._check_roundtrip(x2) dt3 = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3) self._check_roundtrip(x3) def test_relaxed_strides(self): # Test that relaxed strides are converted to non-relaxed c = np.ones((1, 10, 10), dtype='i8') # Check for NPY_RELAXED_STRIDES_CHECKING: if np.ones((10, 1), order="C").flags.f_contiguous: c.strides = (-1, 80, 8) assert memoryview(c).strides == (800, 80, 8) # Writing C-contiguous data to a BytesIO buffer should work fd = io.BytesIO() fd.write(c.data) fortran = c.T assert memoryview(fortran).strides == (8, 80, 800) arr = np.ones((1, 10)) if arr.flags.f_contiguous: shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS']) assert_(strides[0] == 8) arr = np.ones((10, 1), order='F') shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS']) assert_(strides[-1] == 8) class TestArrayAttributeDeletion(object): def test_multiarray_writable_attributes_deletion(self): """ticket #2046, should not seqfault, raise AttributeError""" a = np.ones(2) attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat'] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_multiarray_not_writable_attributes_deletion(self): a = np.ones(2) attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base", "ctypes", "T", "__array_interface__", "__array_struct__", "__array_priority__", "__array_finalize__"] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_multiarray_flags_writable_attribute_deletion(self): a = np.ones(2).flags attr = ['updateifcopy', 'aligned', 'writeable'] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_multiarray_flags_not_writable_attribute_deletion(self): a = np.ones(2).flags attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran", "owndata", "fnc", "forc", "behaved", "carray", "farray", "num"] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_array_interface(): # Test scalar coercion within the array interface class Foo(object): def __init__(self, value): self.value = value self.iface = {'typestr': '=f8'} def __float__(self): return float(self.value) @property def __array_interface__(self): return self.iface f = Foo(0.5) assert_equal(np.array(f), 0.5) assert_equal(np.array([f]), [0.5]) assert_equal(np.array([f, f]), [0.5, 0.5]) assert_equal(np.array(f).dtype, np.dtype('=f8')) # Test various shape definitions f.iface['shape'] = () assert_equal(np.array(f), 0.5) f.iface['shape'] = None assert_raises(TypeError, np.array, f) f.iface['shape'] = (1, 1) assert_equal(np.array(f), [[0.5]]) f.iface['shape'] = (2,) assert_raises(ValueError, np.array, f) # test scalar with no shape class ArrayLike(object): array = np.array(1) __array_interface__ = array.__array_interface__ assert_equal(np.array(ArrayLike()), 1) def test_flat_element_deletion(): it = np.ones(3).flat try: del it[1] del it[1:2] except TypeError: pass except: raise AssertionError def test_scalar_element_deletion(): a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')]) assert_raises(ValueError, a[0].__delitem__, 'x') class TestMemEventHook(TestCase): def test_mem_seteventhook(self): # The actual tests are within the C code in # multiarray/multiarray_tests.c.src test_pydatamem_seteventhook_start() # force an allocation and free of a numpy array # needs to be larger then limit of small memory cacher in ctors.c a = np.zeros(1000) del a test_pydatamem_seteventhook_end() class TestMapIter(TestCase): def test_mapiter(self): # The actual tests are within the C code in # multiarray/multiarray_tests.c.src a = np.arange(12).reshape((3, 4)).astype(float) index = ([1, 1, 2, 0], [0, 0, 2, 3]) vals = [50, 50, 30, 16] test_inplace_increment(a, index, vals) assert_equal(a, [[0.00, 1., 2.0, 19.], [104., 5., 6.0, 7.0], [8.00, 9., 40., 11.]]) b = np.arange(6).astype(float) index = (np.array([1, 2, 0]),) vals = [50, 4, 100.1] test_inplace_increment(b, index, vals) assert_equal(b, [100.1, 51., 6., 3., 4., 5.]) class TestAsCArray(TestCase): def test_1darray(self): array = np.arange(24, dtype=np.double) from_c = test_as_c_array(array, 3) assert_equal(array[3], from_c) def test_2darray(self): array = np.arange(24, dtype=np.double).reshape(3, 8) from_c = test_as_c_array(array, 2, 4) assert_equal(array[2, 4], from_c) def test_3darray(self): array = np.arange(24, dtype=np.double).reshape(2, 3, 4) from_c = test_as_c_array(array, 1, 2, 3) assert_equal(array[1, 2, 3], from_c) class TestConversion(TestCase): def test_array_scalar_relational_operation(self): #All integer for dt1 in np.typecodes['AllInteger']: assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,)) assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,)) for dt2 in np.typecodes['AllInteger']: assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) #Unsigned integers for dt1 in 'BHILQP': assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,)) assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,)) assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,)) #unsigned vs signed for dt2 in 'bhilqp': assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) #Signed integers and floats for dt1 in 'bhlqp' + np.typecodes['Float']: assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) for dt2 in 'bhlqp' + np.typecodes['Float']: assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) class TestWhere(TestCase): def test_basic(self): dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128, np.longdouble, np.clongdouble] for dt in dts: c = np.ones(53, dtype=np.bool) assert_equal(np.where( c, dt(0), dt(1)), dt(0)) assert_equal(np.where(~c, dt(0), dt(1)), dt(1)) assert_equal(np.where(True, dt(0), dt(1)), dt(0)) assert_equal(np.where(False, dt(0), dt(1)), dt(1)) d = np.ones_like(c).astype(dt) e = np.zeros_like(d) r = d.astype(dt) c[7] = False r[7] = e[7] assert_equal(np.where(c, e, e), e) assert_equal(np.where(c, d, e), r) assert_equal(np.where(c, d, e[0]), r) assert_equal(np.where(c, d[0], e), r) assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2]) assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2]) assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3]) assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3]) assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2]) assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3]) assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3]) def test_exotic(self): # object assert_array_equal(np.where(True, None, None), np.array(None)) # zero sized m = np.array([], dtype=bool).reshape(0, 3) b = np.array([], dtype=np.float64).reshape(0, 3) assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3)) # object cast d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313, 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013, 1.267, 0.229, -1.39, 0.487]) nan = float('NaN') e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan, 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'], dtype=object) m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool) r = e[:] r[np.where(m)] = d[np.where(m)] assert_array_equal(np.where(m, d, e), r) r = e[:] r[np.where(~m)] = d[np.where(~m)] assert_array_equal(np.where(m, e, d), r) assert_array_equal(np.where(m, e, e), e) # minimal dtype result with NaN scalar (e.g required by pandas) d = np.array([1., 2.], dtype=np.float32) e = float('NaN') assert_equal(np.where(True, d, e).dtype, np.float32) e = float('Infinity') assert_equal(np.where(True, d, e).dtype, np.float32) e = float('-Infinity') assert_equal(np.where(True, d, e).dtype, np.float32) # also check upcast e = float(1e150) assert_equal(np.where(True, d, e).dtype, np.float64) def test_ndim(self): c = [True, False] a = np.zeros((2, 25)) b = np.ones((2, 25)) r = np.where(np.array(c)[:,np.newaxis], a, b) assert_array_equal(r[0], a[0]) assert_array_equal(r[1], b[0]) a = a.T b = b.T r = np.where(c, a, b) assert_array_equal(r[:,0], a[:,0]) assert_array_equal(r[:,1], b[:,0]) def test_dtype_mix(self): c = np.array([False, True, False, False, False, False, True, False, False, False, True, False]) a = np.uint32(1) b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], dtype=np.float64) r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], dtype=np.float64) assert_equal(np.where(c, a, b), r) a = a.astype(np.float32) b = b.astype(np.int64) assert_equal(np.where(c, a, b), r) # non bool mask c = c.astype(np.int) c[c != 0] = 34242324 assert_equal(np.where(c, a, b), r) # invert tmpmask = c != 0 c[c == 0] = 41247212 c[tmpmask] = 0 assert_equal(np.where(c, b, a), r) def test_foreign(self): c = np.array([False, True, False, False, False, False, True, False, False, False, True, False]) r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], dtype=np.float64) a = np.ones(1, dtype='>i4') b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], dtype=np.float64) assert_equal(np.where(c, a, b), r) b = b.astype('>f8') assert_equal(np.where(c, a, b), r) a = a.astype('<i4') assert_equal(np.where(c, a, b), r) c = c.astype('>i4') assert_equal(np.where(c, a, b), r) def test_error(self): c = [True, True] a = np.ones((4, 5)) b = np.ones((5, 5)) assert_raises(ValueError, np.where, c, a, a) assert_raises(ValueError, np.where, c[0], a, b) def test_string(self): # gh-4778 check strings are properly filled with nulls a = np.array("abc") b = np.array("x" * 753) assert_equal(np.where(True, a, b), "abc") assert_equal(np.where(False, b, a), "abc") # check native datatype sized strings a = np.array("abcd") b = np.array("x" * 8) assert_equal(np.where(True, a, b), "abcd") assert_equal(np.where(False, b, a), "abcd") class TestSizeOf(TestCase): def test_empty_array(self): x = np.array([]) assert_(sys.getsizeof(x) > 0) def check_array(self, dtype): elem_size = dtype(0).itemsize for length in [10, 50, 100, 500]: x = np.arange(length, dtype=dtype) assert_(sys.getsizeof(x) > length * elem_size) def test_array_int32(self): self.check_array(np.int32) def test_array_int64(self): self.check_array(np.int64) def test_array_float32(self): self.check_array(np.float32) def test_array_float64(self): self.check_array(np.float64) def test_view(self): d = np.ones(100) assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) def test_reshape(self): d = np.ones(100) assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) def test_resize(self): d = np.ones(100) old = sys.getsizeof(d) d.resize(50) assert_(old > sys.getsizeof(d)) d.resize(150) assert_(old < sys.getsizeof(d)) def test_error(self): d = np.ones(100) assert_raises(TypeError, d.__sizeof__, "a") class TestHashing(TestCase): def test_arrays_not_hashable(self): x = np.ones(3) assert_raises(TypeError, hash, x) def test_collections_hashable(self): x = np.array([]) self.assertFalse(isinstance(x, collections.Hashable)) class TestArrayPriority(TestCase): # This will go away when __array_priority__ is settled, meanwhile # it serves to check unintended changes. op = operator binary_ops = [ op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod, op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt, op.ge, op.lt, op.le, op.ne, op.eq ] if sys.version_info[0] < 3: binary_ops.append(op.div) class Foo(np.ndarray): __array_priority__ = 100. def __new__(cls, *args, **kwargs): return np.array(*args, **kwargs).view(cls) class Bar(np.ndarray): __array_priority__ = 101. def __new__(cls, *args, **kwargs): return np.array(*args, **kwargs).view(cls) class Other(object): __array_priority__ = 1000. def _all(self, other): return self.__class__() __add__ = __radd__ = _all __sub__ = __rsub__ = _all __mul__ = __rmul__ = _all __pow__ = __rpow__ = _all __div__ = __rdiv__ = _all __mod__ = __rmod__ = _all __truediv__ = __rtruediv__ = _all __floordiv__ = __rfloordiv__ = _all __and__ = __rand__ = _all __xor__ = __rxor__ = _all __or__ = __ror__ = _all __lshift__ = __rlshift__ = _all __rshift__ = __rrshift__ = _all __eq__ = _all __ne__ = _all __gt__ = _all __ge__ = _all __lt__ = _all __le__ = _all def test_ndarray_subclass(self): a = np.array([1, 2]) b = self.Bar([1, 2]) for f in self.binary_ops: msg = repr(f) assert_(isinstance(f(a, b), self.Bar), msg) assert_(isinstance(f(b, a), self.Bar), msg) def test_ndarray_other(self): a = np.array([1, 2]) b = self.Other() for f in self.binary_ops: msg = repr(f) assert_(isinstance(f(a, b), self.Other), msg) assert_(isinstance(f(b, a), self.Other), msg) def test_subclass_subclass(self): a = self.Foo([1, 2]) b = self.Bar([1, 2]) for f in self.binary_ops: msg = repr(f) assert_(isinstance(f(a, b), self.Bar), msg) assert_(isinstance(f(b, a), self.Bar), msg) def test_subclass_other(self): a = self.Foo([1, 2]) b = self.Other() for f in self.binary_ops: msg = repr(f) assert_(isinstance(f(a, b), self.Other), msg) assert_(isinstance(f(b, a), self.Other), msg) class TestBytestringArrayNonzero(TestCase): def test_empty_bstring_array_is_falsey(self): self.assertFalse(np.array([''], dtype=np.str)) def test_whitespace_bstring_array_is_falsey(self): a = np.array(['spam'], dtype=np.str) a[0] = ' \0\0' self.assertFalse(a) def test_all_null_bstring_array_is_falsey(self): a = np.array(['spam'], dtype=np.str) a[0] = '\0\0\0\0' self.assertFalse(a) def test_null_inside_bstring_array_is_truthy(self): a = np.array(['spam'], dtype=np.str) a[0] = ' \0 \0' self.assertTrue(a) class TestUnicodeArrayNonzero(TestCase): def test_empty_ustring_array_is_falsey(self): self.assertFalse(np.array([''], dtype=np.unicode)) def test_whitespace_ustring_array_is_falsey(self): a = np.array(['eggs'], dtype=np.unicode) a[0] = ' \0\0' self.assertFalse(a) def test_all_null_ustring_array_is_falsey(self): a = np.array(['eggs'], dtype=np.unicode) a[0] = '\0\0\0\0' self.assertFalse(a) def test_null_inside_ustring_array_is_truthy(self): a = np.array(['eggs'], dtype=np.unicode) a[0] = ' \0 \0' self.assertTrue(a) if __name__ == "__main__": run_module_suite()
mit
larsmans/scipy
scipy/interpolate/fitpack.py
25
46138
#!/usr/bin/env python """ fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx). FITPACK is a collection of FORTRAN programs for curve and surface fitting with splines and tensor product splines. See http://www.cs.kuleuven.ac.be/cwis/research/nalag/research/topics/fitpack.html or http://www.netlib.org/dierckx/index.html Copyright 2002 Pearu Peterson all rights reserved, Pearu Peterson <pearu@cens.ioc.ee> Permission to use, modify, and distribute this software is given under the terms of the SciPy (BSD style) license. See LICENSE.txt that came with this distribution for specifics. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. TODO: Make interfaces to the following fitpack functions: For univariate splines: cocosp, concon, fourco, insert For bivariate splines: profil, regrid, parsur, surev """ from __future__ import division, print_function, absolute_import __all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde', 'bisplrep', 'bisplev', 'insert', 'splder', 'splantider'] import warnings import numpy as np from . import _fitpack from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose, empty, iinfo, intc, asarray) # Try to replace _fitpack interface with # f2py-generated version from . import dfitpack def _intc_overflow(x, msg=None): """Cast the value to an intc and raise an OverflowError if the value cannot fit. """ if x > iinfo(intc).max: if msg is None: msg = '%r cannot fit into an intc' % x raise OverflowError(msg) return intc(x) _iermess = { 0: ["The spline has a residual sum of squares fp such that " "abs(fp-s)/s<=0.001", None], -1: ["The spline is an interpolating spline (fp=0)", None], -2: ["The spline is weighted least-squares polynomial of degree k.\n" "fp gives the upper bound fp0 for the smoothing factor s", None], 1: ["The required storage space exceeds the available storage space.\n" "Probable causes: data (x,y) size is too small or smoothing parameter" "\ns is too small (fp>s).", ValueError], 2: ["A theoretically impossible result when finding a smoothing spline\n" "with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)", ValueError], 3: ["The maximal number of iterations (20) allowed for finding smoothing\n" "spline with fp=s has been reached. Probable cause: s too small.\n" "(abs(fp-s)/s>0.001)", ValueError], 10: ["Error on input data", ValueError], 'unknown': ["An error occurred", TypeError] } _iermess2 = { 0: ["The spline has a residual sum of squares fp such that " "abs(fp-s)/s<=0.001", None], -1: ["The spline is an interpolating spline (fp=0)", None], -2: ["The spline is weighted least-squares polynomial of degree kx and ky." "\nfp gives the upper bound fp0 for the smoothing factor s", None], -3: ["Warning. The coefficients of the spline have been computed as the\n" "minimal norm least-squares solution of a rank deficient system.", None], 1: ["The required storage space exceeds the available storage space.\n" "Probable causes: nxest or nyest too small or s is too small. (fp>s)", ValueError], 2: ["A theoretically impossible result when finding a smoothing spline\n" "with fp = s. Probable causes: s too small or badly chosen eps.\n" "(abs(fp-s)/s>0.001)", ValueError], 3: ["The maximal number of iterations (20) allowed for finding smoothing\n" "spline with fp=s has been reached. Probable cause: s too small.\n" "(abs(fp-s)/s>0.001)", ValueError], 4: ["No more knots can be added because the number of B-spline\n" "coefficients already exceeds the number of data points m.\n" "Probable causes: either s or m too small. (fp>s)", ValueError], 5: ["No more knots can be added because the additional knot would\n" "coincide with an old one. Probable cause: s too small or too large\n" "a weight to an inaccurate data point. (fp>s)", ValueError], 10: ["Error on input data", ValueError], 11: ["rwrk2 too small, i.e. there is not enough workspace for computing\n" "the minimal least-squares solution of a rank deficient system of\n" "linear equations.", ValueError], 'unknown': ["An error occurred", TypeError] } _parcur_cache = {'t': array([], float), 'wrk': array([], float), 'iwrk': array([], intc), 'u': array([], float), 'ub': 0, 'ue': 1} def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None, full_output=0, nest=None, per=0, quiet=1): """ Find the B-spline representation of an N-dimensional curve. Given a list of N rank-1 arrays, `x`, which represent a curve in N-dimensional space parametrized by `u`, find a smooth approximating spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK. Parameters ---------- x : array_like A list of sample vector arrays representing the curve. w : array_like, optional Strictly positive rank-1 array of weights the same length as `x[0]`. The weights are used in computing the weighted least-squares spline fit. If the errors in the `x` values have standard-deviation given by the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``. u : array_like, optional An array of parameter values. If not given, these values are calculated automatically as ``M = len(x[0])``, where v[0] = 0 v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`) u[i] = v[i] / v[M-1] ub, ue : int, optional The end-points of the parameters interval. Defaults to u[0] and u[-1]. k : int, optional Degree of the spline. Cubic splines are recommended. Even values of `k` should be avoided especially with a small s-value. ``1 <= k <= 5``, default is 3. task : int, optional If task==0 (default), find t and c for a given smoothing factor, s. If task==1, find t and c for another value of the smoothing factor, s. There must have been a previous call with task=0 or task=1 for the same set of data. If task=-1 find the weighted least square spline for a given set of knots, t. s : float, optional A smoothing condition. The amount of smoothness is determined by satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``, where g(x) is the smoothed interpolation of (x,y). The user can use `s` to control the trade-off between closeness and smoothness of fit. Larger `s` means more smoothing while smaller values of `s` indicate less smoothing. Recommended values of `s` depend on the weights, w. If the weights represent the inverse of the standard-deviation of y, then a good `s` value should be found in the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of data points in x, y, and w. t : int, optional The knots needed for task=-1. full_output : int, optional If non-zero, then return optional outputs. nest : int, optional An over-estimate of the total number of knots of the spline to help in determining the storage space. By default nest=m/2. Always large enough is nest=m+k+1. per : int, optional If non-zero, data points are considered periodic with period ``x[m-1] - x[0]`` and a smooth periodic spline approximation is returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used. quiet : int, optional Non-zero to suppress messages. This parameter is deprecated; use standard Python warning filters instead. Returns ------- tck : tuple A tuple (t,c,k) containing the vector of knots, the B-spline coefficients, and the degree of the spline. u : array An array of the values of the parameter. fp : float The weighted sum of squared residuals of the spline approximation. ier : int An integer flag about splrep success. Success is indicated if ier<=0. If ier in [1,2,3] an error occurred but was not raised. Otherwise an error is raised. msg : str A message corresponding to the integer flag, ier. See Also -------- splrep, splev, sproot, spalde, splint, bisplrep, bisplev UnivariateSpline, BivariateSpline Notes ----- See `splev` for evaluation of the spline and its derivatives. The number of dimensions N must be smaller than 11. References ---------- .. [1] P. Dierckx, "Algorithms for smoothing data with periodic and parametric splines, Computer Graphics and Image Processing", 20 (1982) 171-184. .. [2] P. Dierckx, "Algorithms for smoothing data with periodic and parametric splines", report tw55, Dept. Computer Science, K.U.Leuven, 1981. .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on Numerical Analysis, Oxford University Press, 1993. """ if task <= 0: _parcur_cache = {'t': array([], float), 'wrk': array([], float), 'iwrk': array([], intc), 'u': array([], float), 'ub': 0, 'ue': 1} x = atleast_1d(x) idim, m = x.shape if per: for i in range(idim): if x[i][0] != x[i][-1]: if quiet < 2: warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' % (i, m, i))) x[i][-1] = x[i][0] if not 0 < idim < 11: raise TypeError('0 < idim < 11 must hold') if w is None: w = ones(m, float) else: w = atleast_1d(w) ipar = (u is not None) if ipar: _parcur_cache['u'] = u if ub is None: _parcur_cache['ub'] = u[0] else: _parcur_cache['ub'] = ub if ue is None: _parcur_cache['ue'] = u[-1] else: _parcur_cache['ue'] = ue else: _parcur_cache['u'] = zeros(m, float) if not (1 <= k <= 5): raise TypeError('1 <= k= %d <=5 must hold' % k) if not (-1 <= task <= 1): raise TypeError('task must be -1, 0 or 1') if (not len(w) == m) or (ipar == 1 and (not len(u) == m)): raise TypeError('Mismatch of input dimensions') if s is None: s = m - sqrt(2*m) if t is None and task == -1: raise TypeError('Knots must be given for task=-1') if t is not None: _parcur_cache['t'] = atleast_1d(t) n = len(_parcur_cache['t']) if task == -1 and n < 2*k + 2: raise TypeError('There must be at least 2*k+2 knots for task=-1') if m <= k: raise TypeError('m > k must hold') if nest is None: nest = m + 2*k if (task >= 0 and s == 0) or (nest < 0): if per: nest = m + 2*k else: nest = m + k + 1 nest = max(nest, 2*k + 3) u = _parcur_cache['u'] ub = _parcur_cache['ub'] ue = _parcur_cache['ue'] t = _parcur_cache['t'] wrk = _parcur_cache['wrk'] iwrk = _parcur_cache['iwrk'] t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k, task, ipar, s, t, nest, wrk, iwrk, per) _parcur_cache['u'] = o['u'] _parcur_cache['ub'] = o['ub'] _parcur_cache['ue'] = o['ue'] _parcur_cache['t'] = t _parcur_cache['wrk'] = o['wrk'] _parcur_cache['iwrk'] = o['iwrk'] ier = o['ier'] fp = o['fp'] n = len(t) u = o['u'] c.shape = idim, n - k - 1 tcku = [t, list(c), k], u if ier <= 0 and not quiet: warnings.warn(RuntimeWarning(_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" % (k, len(t), m, fp, s))) if ier > 0 and not full_output: if ier in [1, 2, 3]: warnings.warn(RuntimeWarning(_iermess[ier][0])) else: try: raise _iermess[ier][1](_iermess[ier][0]) except KeyError: raise _iermess['unknown'][1](_iermess['unknown'][0]) if full_output: try: return tcku, fp, ier, _iermess[ier][0] except KeyError: return tcku, fp, ier, _iermess['unknown'][0] else: return tcku _curfit_cache = {'t': array([], float), 'wrk': array([], float), 'iwrk': array([], intc)} def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None, full_output=0, per=0, quiet=1): """ Find the B-spline representation of 1-D curve. Given the set of data points ``(x[i], y[i])`` determine a smooth spline approximation of degree k on the interval ``xb <= x <= xe``. Parameters ---------- x, y : array_like The data points defining a curve y = f(x). w : array_like, optional Strictly positive rank-1 array of weights the same length as x and y. The weights are used in computing the weighted least-squares spline fit. If the errors in the y values have standard-deviation given by the vector d, then w should be 1/d. Default is ones(len(x)). xb, xe : float, optional The interval to fit. If None, these default to x[0] and x[-1] respectively. k : int, optional The order of the spline fit. It is recommended to use cubic splines. Even order splines should be avoided especially with small s values. 1 <= k <= 5 task : {1, 0, -1}, optional If task==0 find t and c for a given smoothing factor, s. If task==1 find t and c for another value of the smoothing factor, s. There must have been a previous call with task=0 or task=1 for the same set of data (t will be stored an used internally) If task=-1 find the weighted least square spline for a given set of knots, t. These should be interior knots as knots on the ends will be added automatically. s : float, optional A smoothing condition. The amount of smoothness is determined by satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x) is the smoothed interpolation of (x,y). The user can use s to control the tradeoff between closeness and smoothness of fit. Larger s means more smoothing while smaller values of s indicate less smoothing. Recommended values of s depend on the weights, w. If the weights represent the inverse of the standard-deviation of y, then a good s value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if weights are supplied. s = 0.0 (interpolating) if no weights are supplied. t : array_like, optional The knots needed for task=-1. If given then task is automatically set to -1. full_output : bool, optional If non-zero, then return optional outputs. per : bool, optional If non-zero, data points are considered periodic with period x[m-1] - x[0] and a smooth periodic spline approximation is returned. Values of y[m-1] and w[m-1] are not used. quiet : bool, optional Non-zero to suppress messages. This parameter is deprecated; use standard Python warning filters instead. Returns ------- tck : tuple (t,c,k) a tuple containing the vector of knots, the B-spline coefficients, and the degree of the spline. fp : array, optional The weighted sum of squared residuals of the spline approximation. ier : int, optional An integer flag about splrep success. Success is indicated if ier<=0. If ier in [1,2,3] an error occurred but was not raised. Otherwise an error is raised. msg : str, optional A message corresponding to the integer flag, ier. Notes ----- See splev for evaluation of the spline and its derivatives. The user is responsible for assuring that the values of *x* are unique. Otherwise, *splrep* will not return sensible results. See Also -------- UnivariateSpline, BivariateSpline splprep, splev, sproot, spalde, splint bisplrep, bisplev Notes ----- See splev for evaluation of the spline and its derivatives. Uses the FORTRAN routine curfit from FITPACK. If provided, knots `t` must satisfy the Schoenberg-Whitney conditions, i.e., there must be a subset of data points ``x[j]`` such that ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``. References ---------- Based on algorithms described in [1]_, [2]_, [3]_, and [4]_: .. [1] P. Dierckx, "An algorithm for smoothing, differentiation and integration of experimental data using spline functions", J.Comp.Appl.Maths 1 (1975) 165-184. .. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular grid while using spline functions", SIAM J.Numer.Anal. 19 (1982) 1286-1304. .. [3] P. Dierckx, "An improved algorithm for curve fitting with spline functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981. .. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on Numerical Analysis, Oxford University Press, 1993. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy.interpolate import splev, splrep >>> x = np.linspace(0, 10, 10) >>> y = np.sin(x) >>> tck = splrep(x, y) >>> x2 = np.linspace(0, 10, 200) >>> y2 = splev(x2, tck) >>> plt.plot(x, y, 'o', x2, y2) >>> plt.show() """ if task <= 0: _curfit_cache = {} x, y = map(atleast_1d, [x, y]) m = len(x) if w is None: w = ones(m, float) if s is None: s = 0.0 else: w = atleast_1d(w) if s is None: s = m - sqrt(2*m) if not len(w) == m: raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m)) if (m != len(y)) or (m != len(w)): raise TypeError('Lengths of the first three arguments (x,y,w) must ' 'be equal') if not (1 <= k <= 5): raise TypeError('Given degree of the spline (k=%d) is not supported. ' '(1<=k<=5)' % k) if m <= k: raise TypeError('m > k must hold') if xb is None: xb = x[0] if xe is None: xe = x[-1] if not (-1 <= task <= 1): raise TypeError('task must be -1, 0 or 1') if t is not None: task = -1 if task == -1: if t is None: raise TypeError('Knots must be given for task=-1') numknots = len(t) _curfit_cache['t'] = empty((numknots + 2*k + 2,), float) _curfit_cache['t'][k+1:-k-1] = t nest = len(_curfit_cache['t']) elif task == 0: if per: nest = max(m + 2*k, 2*k + 3) else: nest = max(m + k + 1, 2*k + 3) t = empty((nest,), float) _curfit_cache['t'] = t if task <= 0: if per: _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float) else: _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float) _curfit_cache['iwrk'] = empty((nest,), intc) try: t = _curfit_cache['t'] wrk = _curfit_cache['wrk'] iwrk = _curfit_cache['iwrk'] except KeyError: raise TypeError("must call with task=1 only after" " call with task=0,-1") if not per: n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk, xb, xe, k, s) else: n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s) tck = (t[:n], c[:n], k) if ier <= 0 and not quiet: _mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" % (k, len(t), m, fp, s)) warnings.warn(RuntimeWarning(_mess)) if ier > 0 and not full_output: if ier in [1, 2, 3]: warnings.warn(RuntimeWarning(_iermess[ier][0])) else: try: raise _iermess[ier][1](_iermess[ier][0]) except KeyError: raise _iermess['unknown'][1](_iermess['unknown'][0]) if full_output: try: return tck, fp, ier, _iermess[ier][0] except KeyError: return tck, fp, ier, _iermess['unknown'][0] else: return tck def splev(x, tck, der=0, ext=0): """ Evaluate a B-spline or its derivatives. Given the knots and coefficients of a B-spline representation, evaluate the value of the smoothing polynomial and its derivatives. This is a wrapper around the FORTRAN routines splev and splder of FITPACK. Parameters ---------- x : array_like An array of points at which to return the value of the smoothed spline or its derivatives. If `tck` was returned from `splprep`, then the parameter values, u should be given. tck : tuple A sequence of length 3 returned by `splrep` or `splprep` containing the knots, coefficients, and degree of the spline. der : int, optional The order of derivative of the spline to compute (must be less than or equal to k). ext : int, optional Controls the value returned for elements of ``x`` not in the interval defined by the knot sequence. * if ext=0, return the extrapolated value. * if ext=1, return 0 * if ext=2, raise a ValueError * if ext=3, return the boundary value. The default value is 0. Returns ------- y : ndarray or list of ndarrays An array of values representing the spline function evaluated at the points in ``x``. If `tck` was returned from `splprep`, then this is a list of arrays representing the curve in N-dimensional space. See Also -------- splprep, splrep, sproot, spalde, splint bisplrep, bisplev References ---------- .. [1] C. de Boor, "On calculating with b-splines", J. Approximation Theory, 6, p.50-62, 1972. .. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths Applics, 10, p.134-149, 1972. .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on Numerical Analysis, Oxford University Press, 1993. """ t, c, k = tck try: c[0][0] parametric = True except: parametric = False if parametric: return list(map(lambda c, x=x, t=t, k=k, der=der: splev(x, [t, c, k], der, ext), c)) else: if not (0 <= der <= k): raise ValueError("0<=der=%d<=k=%d must hold" % (der, k)) if ext not in (0, 1, 2, 3): raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext) x = asarray(x) shape = x.shape x = atleast_1d(x).ravel() y, ier = _fitpack._spl_(x, der, t, c, k, ext) if ier == 10: raise ValueError("Invalid input data") if ier == 1: raise ValueError("Found x value not in the domain") if ier: raise TypeError("An error occurred") return y.reshape(shape) def splint(a, b, tck, full_output=0): """ Evaluate the definite integral of a B-spline. Given the knots and coefficients of a B-spline, evaluate the definite integral of the smoothing polynomial between two given points. Parameters ---------- a, b : float The end-points of the integration interval. tck : tuple A tuple (t,c,k) containing the vector of knots, the B-spline coefficients, and the degree of the spline (see `splev`). full_output : int, optional Non-zero to return optional output. Returns ------- integral : float The resulting integral. wrk : ndarray An array containing the integrals of the normalized B-splines defined on the set of knots. Notes ----- splint silently assumes that the spline function is zero outside the data interval (a, b). See Also -------- splprep, splrep, sproot, spalde, splev bisplrep, bisplev UnivariateSpline, BivariateSpline References ---------- .. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines", J. Inst. Maths Applics, 17, p.37-41, 1976. .. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs on Numerical Analysis, Oxford University Press, 1993. """ t, c, k = tck try: c[0][0] parametric = True except: parametric = False if parametric: return list(map(lambda c, a=a, b=b, t=t, k=k: splint(a, b, [t, c, k]), c)) else: aint, wrk = _fitpack._splint(t, c, k, a, b) if full_output: return aint, wrk else: return aint def sproot(tck, mest=10): """ Find the roots of a cubic B-spline. Given the knots (>=8) and coefficients of a cubic B-spline return the roots of the spline. Parameters ---------- tck : tuple A tuple (t,c,k) containing the vector of knots, the B-spline coefficients, and the degree of the spline. The number of knots must be >= 8, and the degree must be 3. The knots must be a montonically increasing sequence. mest : int, optional An estimate of the number of zeros (Default is 10). Returns ------- zeros : ndarray An array giving the roots of the spline. See also -------- splprep, splrep, splint, spalde, splev bisplrep, bisplev UnivariateSpline, BivariateSpline References ---------- .. [1] C. de Boor, "On calculating with b-splines", J. Approximation Theory, 6, p.50-62, 1972. .. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths Applics, 10, p.134-149, 1972. .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on Numerical Analysis, Oxford University Press, 1993. """ t, c, k = tck if k != 3: raise ValueError("sproot works only for cubic (k=3) splines") try: c[0][0] parametric = True except: parametric = False if parametric: return list(map(lambda c, t=t, k=k, mest=mest: sproot([t, c, k], mest), c)) else: if len(t) < 8: raise TypeError("The number of knots %d>=8" % len(t)) z, ier = _fitpack._sproot(t, c, k, mest) if ier == 10: raise TypeError("Invalid input data. " "t1<=..<=t4<t5<..<tn-3<=..<=tn must hold.") if ier == 0: return z if ier == 1: warnings.warn(RuntimeWarning("The number of zeros exceeds mest")) return z raise TypeError("Unknown error") def spalde(x, tck): """ Evaluate all derivatives of a B-spline. Given the knots and coefficients of a cubic B-spline compute all derivatives up to order k at a point (or set of points). Parameters ---------- x : array_like A point or a set of points at which to evaluate the derivatives. Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`. tck : tuple A tuple (t,c,k) containing the vector of knots, the B-spline coefficients, and the degree of the spline. Returns ------- results : {ndarray, list of ndarrays} An array (or a list of arrays) containing all derivatives up to order k inclusive for each point `x`. See Also -------- splprep, splrep, splint, sproot, splev, bisplrep, bisplev, UnivariateSpline, BivariateSpline References ---------- .. [1] de Boor C : On calculating with b-splines, J. Approximation Theory 6 (1972) 50-62. .. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths applics 10 (1972) 134-149. .. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on Numerical Analysis, Oxford University Press, 1993. """ t, c, k = tck try: c[0][0] parametric = True except: parametric = False if parametric: return list(map(lambda c, x=x, t=t, k=k: spalde(x, [t, c, k]), c)) else: x = atleast_1d(x) if len(x) > 1: return list(map(lambda x, tck=tck: spalde(x, tck), x)) d, ier = _fitpack._spalde(t, c, k, x[0]) if ier == 0: return d if ier == 10: raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.") raise TypeError("Unknown error") # def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None, # full_output=0,nest=None,per=0,quiet=1): _surfit_cache = {'tx': array([], float), 'ty': array([], float), 'wrk': array([], float), 'iwrk': array([], intc)} def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None, kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None, full_output=0, nxest=None, nyest=None, quiet=1): """ Find a bivariate B-spline representation of a surface. Given a set of data points (x[i], y[i], z[i]) representing a surface z=f(x,y), compute a B-spline representation of the surface. Based on the routine SURFIT from FITPACK. Parameters ---------- x, y, z : ndarray Rank-1 arrays of data points. w : ndarray, optional Rank-1 array of weights. By default ``w=np.ones(len(x))``. xb, xe : float, optional End points of approximation interval in `x`. By default ``xb = x.min(), xe=x.max()``. yb, ye : float, optional End points of approximation interval in `y`. By default ``yb=y.min(), ye = y.max()``. kx, ky : int, optional The degrees of the spline (1 <= kx, ky <= 5). Third order (kx=ky=3) is recommended. task : int, optional If task=0, find knots in x and y and coefficients for a given smoothing factor, s. If task=1, find knots and coefficients for another value of the smoothing factor, s. bisplrep must have been previously called with task=0 or task=1. If task=-1, find coefficients for a given set of knots tx, ty. s : float, optional A non-negative smoothing factor. If weights correspond to the inverse of the standard-deviation of the errors in z, then a good s-value should be found in the range ``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x). eps : float, optional A threshold for determining the effective rank of an over-determined linear system of equations (0 < eps < 1). `eps` is not likely to need changing. tx, ty : ndarray, optional Rank-1 arrays of the knots of the spline for task=-1 full_output : int, optional Non-zero to return optional outputs. nxest, nyest : int, optional Over-estimates of the total number of knots. If None then ``nxest = max(kx+sqrt(m/2),2*kx+3)``, ``nyest = max(ky+sqrt(m/2),2*ky+3)``. quiet : int, optional Non-zero to suppress printing of messages. This parameter is deprecated; use standard Python warning filters instead. Returns ------- tck : array_like A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and coefficients (c) of the bivariate B-spline representation of the surface along with the degree of the spline. fp : ndarray The weighted sum of squared residuals of the spline approximation. ier : int An integer flag about splrep success. Success is indicated if ier<=0. If ier in [1,2,3] an error occurred but was not raised. Otherwise an error is raised. msg : str A message corresponding to the integer flag, ier. See Also -------- splprep, splrep, splint, sproot, splev UnivariateSpline, BivariateSpline Notes ----- See `bisplev` to evaluate the value of the B-spline given its tck representation. References ---------- .. [1] Dierckx P.:An algorithm for surface fitting with spline functions Ima J. Numer. Anal. 1 (1981) 267-283. .. [2] Dierckx P.:An algorithm for surface fitting with spline functions report tw50, Dept. Computer Science,K.U.Leuven, 1980. .. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on Numerical Analysis, Oxford University Press, 1993. """ x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays. m = len(x) if not (m == len(y) == len(z)): raise TypeError('len(x)==len(y)==len(z) must hold.') if w is None: w = ones(m, float) else: w = atleast_1d(w) if not len(w) == m: raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m)) if xb is None: xb = x.min() if xe is None: xe = x.max() if yb is None: yb = y.min() if ye is None: ye = y.max() if not (-1 <= task <= 1): raise TypeError('task must be -1, 0 or 1') if s is None: s = m - sqrt(2*m) if tx is None and task == -1: raise TypeError('Knots_x must be given for task=-1') if tx is not None: _surfit_cache['tx'] = atleast_1d(tx) nx = len(_surfit_cache['tx']) if ty is None and task == -1: raise TypeError('K nots_y must be given for task=-1') if ty is not None: _surfit_cache['ty'] = atleast_1d(ty) ny = len(_surfit_cache['ty']) if task == -1 and nx < 2*kx+2: raise TypeError('There must be at least 2*kx+2 knots_x for task=-1') if task == -1 and ny < 2*ky+2: raise TypeError('There must be at least 2*ky+2 knots_x for task=-1') if not ((1 <= kx <= 5) and (1 <= ky <= 5)): raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not ' 'supported. (1<=k<=5)' % (kx, ky)) if m < (kx + 1)*(ky + 1): raise TypeError('m >= (kx+1)(ky+1) must hold') if nxest is None: nxest = int(kx + sqrt(m/2)) if nyest is None: nyest = int(ky + sqrt(m/2)) nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3) if task >= 0 and s == 0: nxest = int(kx + sqrt(3*m)) nyest = int(ky + sqrt(3*m)) if task == -1: _surfit_cache['tx'] = atleast_1d(tx) _surfit_cache['ty'] = atleast_1d(ty) tx, ty = _surfit_cache['tx'], _surfit_cache['ty'] wrk = _surfit_cache['wrk'] u = nxest - kx - 1 v = nyest - ky - 1 km = max(kx, ky) + 1 ne = max(nxest, nyest) bx, by = kx*v + ky + 1, ky*u + kx + 1 b1, b2 = bx, bx + v - ky if bx > by: b1, b2 = by, by + u - kx msg = "Too many data points to interpolate" lwrk1 = _intc_overflow(u*v*(2 + b1 + b2) + 2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1, msg=msg) lwrk2 = _intc_overflow(u*v*(b2 + 1) + b2, msg=msg) tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky, task, s, eps, tx, ty, nxest, nyest, wrk, lwrk1, lwrk2) _curfit_cache['tx'] = tx _curfit_cache['ty'] = ty _curfit_cache['wrk'] = o['wrk'] ier, fp = o['ier'], o['fp'] tck = [tx, ty, c, kx, ky] ierm = min(11, max(-3, ier)) if ierm <= 0 and not quiet: _mess = (_iermess2[ierm][0] + "\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" % (kx, ky, len(tx), len(ty), m, fp, s)) warnings.warn(RuntimeWarning(_mess)) if ierm > 0 and not full_output: if ier in [1, 2, 3, 4, 5]: _mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" % (kx, ky, len(tx), len(ty), m, fp, s)) warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess)) else: try: raise _iermess2[ierm][1](_iermess2[ierm][0]) except KeyError: raise _iermess2['unknown'][1](_iermess2['unknown'][0]) if full_output: try: return tck, fp, ier, _iermess2[ierm][0] except KeyError: return tck, fp, ier, _iermess2['unknown'][0] else: return tck def bisplev(x, y, tck, dx=0, dy=0): """ Evaluate a bivariate B-spline and its derivatives. Return a rank-2 array of spline function values (or spline derivative values) at points given by the cross-product of the rank-1 arrays `x` and `y`. In special cases, return an array or just a float if either `x` or `y` or both are floats. Based on BISPEV from FITPACK. Parameters ---------- x, y : ndarray Rank-1 arrays specifying the domain over which to evaluate the spline or its derivative. tck : tuple A sequence of length 5 returned by `bisplrep` containing the knot locations, the coefficients, and the degree of the spline: [tx, ty, c, kx, ky]. dx, dy : int, optional The orders of the partial derivatives in `x` and `y` respectively. Returns ------- vals : ndarray The B-spline or its derivative evaluated over the set formed by the cross-product of `x` and `y`. See Also -------- splprep, splrep, splint, sproot, splev UnivariateSpline, BivariateSpline Notes ----- See `bisplrep` to generate the `tck` representation. References ---------- .. [1] Dierckx P. : An algorithm for surface fitting with spline functions Ima J. Numer. Anal. 1 (1981) 267-283. .. [2] Dierckx P. : An algorithm for surface fitting with spline functions report tw50, Dept. Computer Science,K.U.Leuven, 1980. .. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on Numerical Analysis, Oxford University Press, 1993. """ tx, ty, c, kx, ky = tck if not (0 <= dx < kx): raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx)) if not (0 <= dy < ky): raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky)) x, y = map(atleast_1d, [x, y]) if (len(x.shape) != 1) or (len(y.shape) != 1): raise ValueError("First two entries should be rank-1 arrays.") z, ier = _fitpack._bispev(tx, ty, c, kx, ky, x, y, dx, dy) if ier == 10: raise ValueError("Invalid input data") if ier: raise TypeError("An error occurred") z.shape = len(x), len(y) if len(z) > 1: return z if len(z[0]) > 1: return z[0] return z[0][0] def dblint(xa, xb, ya, yb, tck): """Evaluate the integral of a spline over area [xa,xb] x [ya,yb]. Parameters ---------- xa, xb : float The end-points of the x integration interval. ya, yb : float The end-points of the y integration interval. tck : list [tx, ty, c, kx, ky] A sequence of length 5 returned by bisplrep containing the knot locations tx, ty, the coefficients c, and the degrees kx, ky of the spline. Returns ------- integ : float The value of the resulting integral. """ tx, ty, c, kx, ky = tck return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb) def insert(x, tck, m=1, per=0): """ Insert knots into a B-spline. Given the knots and coefficients of a B-spline representation, create a new B-spline with a knot inserted `m` times at point `x`. This is a wrapper around the FORTRAN routine insert of FITPACK. Parameters ---------- x (u) : array_like A 1-D point at which to insert a new knot(s). If `tck` was returned from ``splprep``, then the parameter values, u should be given. tck : tuple A tuple (t,c,k) returned by ``splrep`` or ``splprep`` containing the vector of knots, the B-spline coefficients, and the degree of the spline. m : int, optional The number of times to insert the given knot (its multiplicity). Default is 1. per : int, optional If non-zero, the input spline is considered periodic. Returns ------- tck : tuple A tuple (t,c,k) containing the vector of knots, the B-spline coefficients, and the degree of the new spline. ``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline. In case of a periodic spline (``per != 0``) there must be either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x`` or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``. Notes ----- Based on algorithms from [1]_ and [2]_. References ---------- .. [1] W. Boehm, "Inserting new knots into b-spline curves.", Computer Aided Design, 12, p.199-201, 1980. .. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on Numerical Analysis", Oxford University Press, 1993. """ t, c, k = tck try: c[0][0] parametric = True except: parametric = False if parametric: cc = [] for c_vals in c: tt, cc_val, kk = insert(x, [t, c_vals, k], m) cc.append(cc_val) return (tt, cc, kk) else: tt, cc, ier = _fitpack._insert(per, t, c, k, x, m) if ier == 10: raise ValueError("Invalid input data") if ier: raise TypeError("An error occurred") return (tt, cc, k) def splder(tck, n=1): """ Compute the spline representation of the derivative of a given spline Parameters ---------- tck : tuple of (t, c, k) Spline whose derivative to compute n : int, optional Order of derivative to evaluate. Default: 1 Returns ------- tck_der : tuple of (t2, c2, k2) Spline of order k2=k-n representing the derivative of the input spline. Notes ----- .. versionadded:: 0.13.0 See Also -------- splantider, splev, spalde Examples -------- This can be used for finding maxima of a curve: >>> from scipy.interpolate import splrep, splder, sproot >>> x = np.linspace(0, 10, 70) >>> y = np.sin(x) >>> spl = splrep(x, y, k=4) Now, differentiate the spline and find the zeros of the derivative. (NB: `sproot` only works for order 3 splines, so we fit an order 4 spline): >>> dspl = splder(spl) >>> sproot(dspl) / np.pi array([ 0.50000001, 1.5 , 2.49999998]) This agrees well with roots :math:`\pi/2 + n\pi` of :math:`\cos(x) = \sin'(x)`. """ if n < 0: return splantider(tck, -n) t, c, k = tck if n > k: raise ValueError(("Order of derivative (n = %r) must be <= " "order of spline (k = %r)") % (n, tck[2])) with np.errstate(invalid='raise', divide='raise'): try: for j in range(n): # See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5 # Compute the denominator in the differentiation formula. dt = t[k+1:-1] - t[1:-k-1] # Compute the new coefficients c = (c[1:-1-k] - c[:-2-k]) * k / dt # Pad coefficient array to same size as knots (FITPACK # convention) c = np.r_[c, [0]*k] # Adjust knots t = t[1:-1] k -= 1 except FloatingPointError: raise ValueError(("The spline has internal repeated knots " "and is not differentiable %d times") % n) return t, c, k def splantider(tck, n=1): """ Compute the spline for the antiderivative (integral) of a given spline. Parameters ---------- tck : tuple of (t, c, k) Spline whose antiderivative to compute n : int, optional Order of antiderivative to evaluate. Default: 1 Returns ------- tck_ader : tuple of (t2, c2, k2) Spline of order k2=k+n representing the antiderivative of the input spline. See Also -------- splder, splev, spalde Notes ----- The `splder` function is the inverse operation of this function. Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo rounding error. .. versionadded:: 0.13.0 Examples -------- >>> from scipy.interpolate import splrep, splder, splantider, splev >>> x = np.linspace(0, np.pi/2, 70) >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2) >>> spl = splrep(x, y) The derivative is the inverse operation of the antiderivative, although some floating point error accumulates: >>> splev(1.7, spl), splev(1.7, splder(splantider(spl))) (array(2.1565429877197317), array(2.1565429877201865)) Antiderivative can be used to evaluate definite integrals: >>> ispl = splantider(spl) >>> splev(np.pi/2, ispl) - splev(0, ispl) 2.2572053588768486 This is indeed an approximation to the complete elliptic integral :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`: >>> from scipy.special import ellipk >>> ellipk(0.8) 2.2572053268208538 """ if n < 0: return splder(tck, -n) t, c, k = tck for j in range(n): # This is the inverse set of operations to splder. # Compute the multiplier in the antiderivative formula. dt = t[k+1:] - t[:-k-1] # Compute the new coefficients c = np.cumsum(c[:-k-1] * dt) / (k + 1) c = np.r_[0, c, [c[-1]]*(k+2)] # New knots t = np.r_[t[0], t, t[-1]] k += 1 return t, c, k
bsd-3-clause
ryfeus/lambda-packs
Sklearn_scipy_numpy/source/sklearn/manifold/setup.py
99
1243
import os from os.path import join import numpy from numpy.distutils.misc_util import Configuration from sklearn._build_utils import get_blas_info def configuration(parent_package="", top_path=None): config = Configuration("manifold", parent_package, top_path) libraries = [] if os.name == 'posix': libraries.append('m') config.add_extension("_utils", sources=["_utils.c"], include_dirs=[numpy.get_include()], libraries=libraries, extra_compile_args=["-O3"]) cblas_libs, blas_info = get_blas_info() eca = blas_info.pop('extra_compile_args', []) eca.append("-O4") config.add_extension("_barnes_hut_tsne", libraries=cblas_libs, sources=["_barnes_hut_tsne.c"], include_dirs=[join('..', 'src', 'cblas'), numpy.get_include(), blas_info.pop('include_dirs', [])], extra_compile_args=eca, **blas_info) return config if __name__ == "__main__": from numpy.distutils.core import setup setup(**configuration().todict())
mit
costypetrisor/scikit-learn
examples/cluster/plot_lena_compress.py
271
2229
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Vector Quantization Example ========================================================= The classic image processing example, Lena, an 8-bit grayscale bit-depth, 512 x 512 sized image, is used here to illustrate how `k`-means is used for vector quantization. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import scipy as sp import matplotlib.pyplot as plt from sklearn import cluster n_clusters = 5 np.random.seed(0) try: lena = sp.lena() except AttributeError: # Newer versions of scipy have lena in misc from scipy import misc lena = misc.lena() X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4) k_means.fit(X) values = k_means.cluster_centers_.squeeze() labels = k_means.labels_ # create an array from labels and values lena_compressed = np.choose(labels, values) lena_compressed.shape = lena.shape vmin = lena.min() vmax = lena.max() # original lena plt.figure(1, figsize=(3, 2.2)) plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256) # compressed lena plt.figure(2, figsize=(3, 2.2)) plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax) # equal bins lena regular_values = np.linspace(0, 256, n_clusters + 1) regular_labels = np.searchsorted(regular_values, lena) - 1 regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean regular_lena = np.choose(regular_labels.ravel(), regular_values) regular_lena.shape = lena.shape plt.figure(3, figsize=(3, 2.2)) plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax) # histogram plt.figure(4, figsize=(3, 2.2)) plt.clf() plt.axes([.01, .01, .98, .98]) plt.hist(X, bins=256, color='.5', edgecolor='.5') plt.yticks(()) plt.xticks(regular_values) values = np.sort(values) for center_1, center_2 in zip(values[:-1], values[1:]): plt.axvline(.5 * (center_1 + center_2), color='b') for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]): plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--') plt.show()
bsd-3-clause
Parisson/Telemeta
telemeta/management/commands/telemeta-export-item-revisions-plot.py
2
2723
from optparse import make_option from django.conf import settings from django.core.management.base import BaseCommand, CommandError from telemeta.models import * import datetime, time, calendar, itertools import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.dates as mdates from matplotlib.backends.backend_pdf import PdfPages SECOND = 1 MINUTE = SECOND * 60 HOUR = MINUTE * 60 DAY = HOUR * 24 MONTH = DAY * 30 class Command(BaseCommand): help = "export MediaItem revisions vs. dates to a matplotlib PDF file" """ info : http://www.geophysique.be/2012/06/14/matplotlib-datetimes-tutorial-03-grouping-sparse-data/ http://matplotlib.org/examples/pylab_examples/date_demo2.html http://matplotlib.org/examples/api/date_demo.html """ args = 'year month day' binning = 7*DAY option_list = BaseCommand.option_list + ( make_option('-y', '--year', dest='year', help='year of the first revision'), make_option('-m', '--month', dest='month', help='month of the first revision'), make_option('-d', '--day', dest='day', help='day of the first revision'), ) def group(self, di): return int(calendar.timegm(di.timetuple()))/self.binning def handle(self, *args, **kwargs): limit_date = datetime.datetime(int(kwargs.get('year')), int(kwargs.get('month')), int(kwargs.get('day'))) years = mdates.YearLocator() months = mdates.MonthLocator(range(1,13), bymonthday=1, interval=3) mondays = mdates.WeekdayLocator(mdates.MONDAY) monthsFmt = mdates.DateFormatter("%b '%y") yearsFmt = mdates.DateFormatter('%Y') revisions = Revision.objects.filter(time__gte=limit_date) list_of_dates = [r.time for r in revisions] grouped_dates = [[datetime.datetime(*time.gmtime(d*self.binning)[:6]), len(list(g))] \ for d,g in itertools.groupby(list_of_dates, self.group)] grouped_dates = zip(*grouped_dates) revs = np.array(grouped_dates[1]) revs_mean = np.mean(revs) fig = plt.figure() ax = fig.add_subplot(111, ylabel='Revisions by week (mean='+str(np.round(revs_mean, 1))+')') ax.plot_date(grouped_dates[0], grouped_dates[1] , '-') ax.xaxis.set_major_locator(months) ax.xaxis.set_major_formatter(monthsFmt) ax.xaxis.set_minor_locator(mondays) ax.autoscale_view() ax.grid(True) fig.autofmt_xdate() plt.savefig('/tmp/telemeta-revisions.png') plt.savefig('/tmp/telemeta-revisions.pdf') #plt.show()
agpl-3.0
michigraber/scikit-learn
sklearn/svm/setup.py
321
3157
import os from os.path import join import numpy from sklearn._build_utils import get_blas_info def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('svm', parent_package, top_path) config.add_subpackage('tests') # Section LibSVM # we compile both libsvm and libsvm_sparse config.add_library('libsvm-skl', sources=[join('src', 'libsvm', 'libsvm_template.cpp')], depends=[join('src', 'libsvm', 'svm.cpp'), join('src', 'libsvm', 'svm.h')], # Force C++ linking in case gcc is picked up instead # of g++ under windows with some versions of MinGW extra_link_args=['-lstdc++'], ) libsvm_sources = ['libsvm.c'] libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'), join('src', 'libsvm', 'libsvm_template.cpp'), join('src', 'libsvm', 'svm.cpp'), join('src', 'libsvm', 'svm.h')] config.add_extension('libsvm', sources=libsvm_sources, include_dirs=[numpy.get_include(), join('src', 'libsvm')], libraries=['libsvm-skl'], depends=libsvm_depends, ) ### liblinear module cblas_libs, blas_info = get_blas_info() if os.name == 'posix': cblas_libs.append('m') liblinear_sources = ['liblinear.c', join('src', 'liblinear', '*.cpp')] liblinear_depends = [join('src', 'liblinear', '*.h'), join('src', 'liblinear', 'liblinear_helper.c')] config.add_extension('liblinear', sources=liblinear_sources, libraries=cblas_libs, include_dirs=[join('..', 'src', 'cblas'), numpy.get_include(), blas_info.pop('include_dirs', [])], extra_compile_args=blas_info.pop('extra_compile_args', []), depends=liblinear_depends, # extra_compile_args=['-O0 -fno-inline'], ** blas_info) ## end liblinear module # this should go *after* libsvm-skl libsvm_sparse_sources = ['libsvm_sparse.c'] config.add_extension('libsvm_sparse', libraries=['libsvm-skl'], sources=libsvm_sparse_sources, include_dirs=[numpy.get_include(), join("src", "libsvm")], depends=[join("src", "libsvm", "svm.h"), join("src", "libsvm", "libsvm_sparse_helper.c")]) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
bsd-3-clause
michaelpacer/scikit-image
doc/examples/applications/plot_geometric.py
28
3253
""" =============================== Using geometric transformations =============================== In this example, we will see how to use geometric transformations in the context of image processing. """ from __future__ import print_function import math import numpy as np import matplotlib.pyplot as plt from skimage import data from skimage import transform as tf margins = dict(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1) """ Basics ====== Several different geometric transformation types are supported: similarity, affine, projective and polynomial. Geometric transformations can either be created using the explicit parameters (e.g. scale, shear, rotation and translation) or the transformation matrix: First we create a transformation using explicit parameters: """ tform = tf.SimilarityTransform(scale=1, rotation=math.pi / 2, translation=(0, 1)) print(tform.params) """ Alternatively you can define a transformation by the transformation matrix itself: """ matrix = tform.params.copy() matrix[1, 2] = 2 tform2 = tf.SimilarityTransform(matrix) """ These transformation objects can then be used to apply forward and inverse coordinate transformations between the source and destination coordinate systems: """ coord = [1, 0] print(tform2(coord)) print(tform2.inverse(tform(coord))) """ Image warping ============= Geometric transformations can also be used to warp images: """ text = data.text() tform = tf.SimilarityTransform(scale=1, rotation=math.pi / 4, translation=(text.shape[0] / 2, -100)) rotated = tf.warp(text, tform) back_rotated = tf.warp(rotated, tform.inverse) fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3)) fig.subplots_adjust(**margins) plt.gray() ax1.imshow(text) ax1.axis('off') ax2.imshow(rotated) ax2.axis('off') ax3.imshow(back_rotated) ax3.axis('off') """ .. image:: PLOT2RST.current_figure Parameter estimation ==================== In addition to the basic functionality mentioned above you can also estimate the parameters of a geometric transformation using the least-squares method. This can amongst other things be used for image registration or rectification, where you have a set of control points or homologous/corresponding points in two images. Let's assume we want to recognize letters on a photograph which was not taken from the front but at a certain angle. In the simplest case of a plane paper surface the letters are projectively distorted. Simple matching algorithms would not be able to match such symbols. One solution to this problem would be to warp the image so that the distortion is removed and then apply a matching algorithm: """ text = data.text() src = np.array(( (0, 0), (0, 50), (300, 50), (300, 0) )) dst = np.array(( (155, 15), (65, 40), (260, 130), (360, 95) )) tform3 = tf.ProjectiveTransform() tform3.estimate(src, dst) warped = tf.warp(text, tform3, output_shape=(50, 300)) fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(8, 3)) fig.subplots_adjust(**margins) plt.gray() ax1.imshow(text) ax1.plot(dst[:, 0], dst[:, 1], '.r') ax1.axis('off') ax2.imshow(warped) ax2.axis('off') """ .. image:: PLOT2RST.current_figure """ plt.show()
bsd-3-clause
Djabbz/scikit-learn
sklearn/cross_decomposition/tests/test_pls.py
215
11427
import numpy as np from sklearn.utils.testing import (assert_array_almost_equal, assert_array_equal, assert_true, assert_raise_message) from sklearn.datasets import load_linnerud from sklearn.cross_decomposition import pls_ from nose.tools import assert_equal def test_pls(): d = load_linnerud() X = d.data Y = d.target # 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A) # =========================================================== # Compare 2 algo.: nipals vs. svd # ------------------------------ pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1]) pls_bynipals.fit(X, Y) pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1]) pls_bysvd.fit(X, Y) # check equalities of loading (up to the sign of the second column) assert_array_almost_equal( pls_bynipals.x_loadings_, np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5, err_msg="nipals and svd implementation lead to different x loadings") assert_array_almost_equal( pls_bynipals.y_loadings_, np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5, err_msg="nipals and svd implementation lead to different y loadings") # Check PLS properties (with n_components=X.shape[1]) # --------------------------------------------------- plsca = pls_.PLSCanonical(n_components=X.shape[1]) plsca.fit(X, Y) T = plsca.x_scores_ P = plsca.x_loadings_ Wx = plsca.x_weights_ U = plsca.y_scores_ Q = plsca.y_loadings_ Wy = plsca.y_weights_ def check_ortho(M, err_msg): K = np.dot(M.T, M) assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg) # Orthogonality of weights # ~~~~~~~~~~~~~~~~~~~~~~~~ check_ortho(Wx, "x weights are not orthogonal") check_ortho(Wy, "y weights are not orthogonal") # Orthogonality of latent scores # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ check_ortho(T, "x scores are not orthogonal") check_ortho(U, "y scores are not orthogonal") # Check X = TP' and Y = UQ' (with (p == q) components) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # center scale X, Y Xc, Yc, x_mean, y_mean, x_std, y_std =\ pls_._center_scale_xy(X.copy(), Y.copy(), scale=True) assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'") assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'") # Check that rotations on training data lead to scores # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Xr = plsca.transform(X) assert_array_almost_equal(Xr, plsca.x_scores_, err_msg="rotation on X failed") Xr, Yr = plsca.transform(X, Y) assert_array_almost_equal(Xr, plsca.x_scores_, err_msg="rotation on X failed") assert_array_almost_equal(Yr, plsca.y_scores_, err_msg="rotation on Y failed") # "Non regression test" on canonical PLS # -------------------------------------- # The results were checked against the R-package plspm pls_ca = pls_.PLSCanonical(n_components=X.shape[1]) pls_ca.fit(X, Y) x_weights = np.array( [[-0.61330704, 0.25616119, -0.74715187], [-0.74697144, 0.11930791, 0.65406368], [-0.25668686, -0.95924297, -0.11817271]]) assert_array_almost_equal(pls_ca.x_weights_, x_weights) x_rotations = np.array( [[-0.61330704, 0.41591889, -0.62297525], [-0.74697144, 0.31388326, 0.77368233], [-0.25668686, -0.89237972, -0.24121788]]) assert_array_almost_equal(pls_ca.x_rotations_, x_rotations) y_weights = np.array( [[+0.58989127, 0.7890047, 0.1717553], [+0.77134053, -0.61351791, 0.16920272], [-0.23887670, -0.03267062, 0.97050016]]) assert_array_almost_equal(pls_ca.y_weights_, y_weights) y_rotations = np.array( [[+0.58989127, 0.7168115, 0.30665872], [+0.77134053, -0.70791757, 0.19786539], [-0.23887670, -0.00343595, 0.94162826]]) assert_array_almost_equal(pls_ca.y_rotations_, y_rotations) # 2) Regression PLS (PLS2): "Non regression test" # =============================================== # The results were checked against the R-packages plspm, misOmics and pls pls_2 = pls_.PLSRegression(n_components=X.shape[1]) pls_2.fit(X, Y) x_weights = np.array( [[-0.61330704, -0.00443647, 0.78983213], [-0.74697144, -0.32172099, -0.58183269], [-0.25668686, 0.94682413, -0.19399983]]) assert_array_almost_equal(pls_2.x_weights_, x_weights) x_loadings = np.array( [[-0.61470416, -0.24574278, 0.78983213], [-0.65625755, -0.14396183, -0.58183269], [-0.51733059, 1.00609417, -0.19399983]]) assert_array_almost_equal(pls_2.x_loadings_, x_loadings) y_weights = np.array( [[+0.32456184, 0.29892183, 0.20316322], [+0.42439636, 0.61970543, 0.19320542], [-0.13143144, -0.26348971, -0.17092916]]) assert_array_almost_equal(pls_2.y_weights_, y_weights) y_loadings = np.array( [[+0.32456184, 0.29892183, 0.20316322], [+0.42439636, 0.61970543, 0.19320542], [-0.13143144, -0.26348971, -0.17092916]]) assert_array_almost_equal(pls_2.y_loadings_, y_loadings) # 3) Another non-regression test of Canonical PLS on random dataset # ================================================================= # The results were checked against the R-package plspm n = 500 p_noise = 10 q_noise = 5 # 2 latents vars: np.random.seed(11) l1 = np.random.normal(size=n) l2 = np.random.normal(size=n) latents = np.array([l1, l1, l2, l2]).T X = latents + np.random.normal(size=4 * n).reshape((n, 4)) Y = latents + np.random.normal(size=4 * n).reshape((n, 4)) X = np.concatenate( (X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1) Y = np.concatenate( (Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1) np.random.seed(None) pls_ca = pls_.PLSCanonical(n_components=3) pls_ca.fit(X, Y) x_weights = np.array( [[0.65803719, 0.19197924, 0.21769083], [0.7009113, 0.13303969, -0.15376699], [0.13528197, -0.68636408, 0.13856546], [0.16854574, -0.66788088, -0.12485304], [-0.03232333, -0.04189855, 0.40690153], [0.1148816, -0.09643158, 0.1613305], [0.04792138, -0.02384992, 0.17175319], [-0.06781, -0.01666137, -0.18556747], [-0.00266945, -0.00160224, 0.11893098], [-0.00849528, -0.07706095, 0.1570547], [-0.00949471, -0.02964127, 0.34657036], [-0.03572177, 0.0945091, 0.3414855], [0.05584937, -0.02028961, -0.57682568], [0.05744254, -0.01482333, -0.17431274]]) assert_array_almost_equal(pls_ca.x_weights_, x_weights) x_loadings = np.array( [[0.65649254, 0.1847647, 0.15270699], [0.67554234, 0.15237508, -0.09182247], [0.19219925, -0.67750975, 0.08673128], [0.2133631, -0.67034809, -0.08835483], [-0.03178912, -0.06668336, 0.43395268], [0.15684588, -0.13350241, 0.20578984], [0.03337736, -0.03807306, 0.09871553], [-0.06199844, 0.01559854, -0.1881785], [0.00406146, -0.00587025, 0.16413253], [-0.00374239, -0.05848466, 0.19140336], [0.00139214, -0.01033161, 0.32239136], [-0.05292828, 0.0953533, 0.31916881], [0.04031924, -0.01961045, -0.65174036], [0.06172484, -0.06597366, -0.1244497]]) assert_array_almost_equal(pls_ca.x_loadings_, x_loadings) y_weights = np.array( [[0.66101097, 0.18672553, 0.22826092], [0.69347861, 0.18463471, -0.23995597], [0.14462724, -0.66504085, 0.17082434], [0.22247955, -0.6932605, -0.09832993], [0.07035859, 0.00714283, 0.67810124], [0.07765351, -0.0105204, -0.44108074], [-0.00917056, 0.04322147, 0.10062478], [-0.01909512, 0.06182718, 0.28830475], [0.01756709, 0.04797666, 0.32225745]]) assert_array_almost_equal(pls_ca.y_weights_, y_weights) y_loadings = np.array( [[0.68568625, 0.1674376, 0.0969508], [0.68782064, 0.20375837, -0.1164448], [0.11712173, -0.68046903, 0.12001505], [0.17860457, -0.6798319, -0.05089681], [0.06265739, -0.0277703, 0.74729584], [0.0914178, 0.00403751, -0.5135078], [-0.02196918, -0.01377169, 0.09564505], [-0.03288952, 0.09039729, 0.31858973], [0.04287624, 0.05254676, 0.27836841]]) assert_array_almost_equal(pls_ca.y_loadings_, y_loadings) # Orthogonality of weights # ~~~~~~~~~~~~~~~~~~~~~~~~ check_ortho(pls_ca.x_weights_, "x weights are not orthogonal") check_ortho(pls_ca.y_weights_, "y weights are not orthogonal") # Orthogonality of latent scores # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ check_ortho(pls_ca.x_scores_, "x scores are not orthogonal") check_ortho(pls_ca.y_scores_, "y scores are not orthogonal") def test_PLSSVD(): # Let's check the PLSSVD doesn't return all possible component but just # the specificied number d = load_linnerud() X = d.data Y = d.target n_components = 2 for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]: pls = clf(n_components=n_components) pls.fit(X, Y) assert_equal(n_components, pls.y_scores_.shape[1]) def test_univariate_pls_regression(): # Ensure 1d Y is correctly interpreted d = load_linnerud() X = d.data Y = d.target clf = pls_.PLSRegression() # Compare 1d to column vector model1 = clf.fit(X, Y[:, 0]).coef_ model2 = clf.fit(X, Y[:, :1]).coef_ assert_array_almost_equal(model1, model2) def test_predict_transform_copy(): # check that the "copy" keyword works d = load_linnerud() X = d.data Y = d.target clf = pls_.PLSCanonical() X_copy = X.copy() Y_copy = Y.copy() clf.fit(X, Y) # check that results are identical with copy assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False)) assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False)) # check also if passing Y assert_array_almost_equal(clf.transform(X, Y), clf.transform(X.copy(), Y.copy(), copy=False)) # check that copy doesn't destroy # we do want to check exact equality here assert_array_equal(X_copy, X) assert_array_equal(Y_copy, Y) # also check that mean wasn't zero before (to make sure we didn't touch it) assert_true(np.all(X.mean(axis=0) != 0)) def test_scale(): d = load_linnerud() X = d.data Y = d.target # causes X[:, -1].std() to be zero X[:, -1] = 1.0 for clf in [pls_.PLSCanonical(), pls_.PLSRegression(), pls_.PLSSVD()]: clf.set_params(scale=True) clf.fit(X, Y) def test_pls_errors(): d = load_linnerud() X = d.data Y = d.target for clf in [pls_.PLSCanonical(), pls_.PLSRegression(), pls_.PLSSVD()]: clf.n_components = 4 assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
bsd-3-clause
lmallin/coverage_test
python_venv/lib/python2.7/site-packages/pandas/tests/io/test_feather.py
3
4068
""" test feather-format compat """ import pytest feather = pytest.importorskip('feather') import numpy as np import pandas as pd from pandas.io.feather_format import to_feather, read_feather from feather import FeatherError from pandas.util.testing import assert_frame_equal, ensure_clean @pytest.mark.single class TestFeather(object): def check_error_on_write(self, df, exc): # check that we are raising the exception # on writing with pytest.raises(exc): with ensure_clean() as path: to_feather(df, path) def check_round_trip(self, df): with ensure_clean() as path: to_feather(df, path) result = read_feather(path) assert_frame_equal(result, df) def test_error(self): for obj in [pd.Series([1, 2, 3]), 1, 'foo', pd.Timestamp('20130101'), np.array([1, 2, 3])]: self.check_error_on_write(obj, ValueError) def test_basic(self): df = pd.DataFrame({'string': list('abc'), 'int': list(range(1, 4)), 'uint': np.arange(3, 6).astype('u1'), 'float': np.arange(4.0, 7.0, dtype='float64'), 'float_with_null': [1., np.nan, 3], 'bool': [True, False, True], 'bool_with_null': [True, np.nan, False], 'cat': pd.Categorical(list('abc')), 'dt': pd.date_range('20130101', periods=3), 'dttz': pd.date_range('20130101', periods=3, tz='US/Eastern'), 'dt_with_null': [pd.Timestamp('20130101'), pd.NaT, pd.Timestamp('20130103')], 'dtns': pd.date_range('20130101', periods=3, freq='ns')}) assert df.dttz.dtype.tz.zone == 'US/Eastern' self.check_round_trip(df) def test_strided_data_issues(self): # strided data issuehttps://github.com/wesm/feather/issues/97 df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list('abc')) self.check_error_on_write(df, FeatherError) def test_duplicate_columns(self): # https://github.com/wesm/feather/issues/53 # not currently able to handle duplicate columns df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list('aaa')).copy() self.check_error_on_write(df, ValueError) def test_stringify_columns(self): df = pd.DataFrame(np.arange(12).reshape(4, 3)).copy() self.check_error_on_write(df, ValueError) def test_unsupported(self): # period df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)}) self.check_error_on_write(df, ValueError) df = pd.DataFrame({'a': pd.timedelta_range('1 day', periods=3)}) self.check_error_on_write(df, FeatherError) # non-strings df = pd.DataFrame({'a': ['a', 1, 2.0]}) self.check_error_on_write(df, ValueError) def test_write_with_index(self): df = pd.DataFrame({'A': [1, 2, 3]}) self.check_round_trip(df) # non-default index for index in [[2, 3, 4], pd.date_range('20130101', periods=3), list('abc'), [1, 3, 4], pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]), ]: df.index = index self.check_error_on_write(df, ValueError) # index with meta-data df.index = [0, 1, 2] df.index.name = 'foo' self.check_error_on_write(df, ValueError) # column multi-index df.index = [0, 1, 2] df.columns = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]), self.check_error_on_write(df, ValueError)
mit
MaxHalford/Prince
setup.py
1
3727
#!/usr/bin/env python # -*- coding: utf-8 -*- # Note: To use the 'upload' functionality of this file, you must: # $ pip install twine import io import os import sys from shutil import rmtree from setuptools import find_packages, setup, Command # Package meta-data. NAME = 'prince' DESCRIPTION = 'Statistical factor analysis in Python' LONG_DESCRIPTION_CONTENT_TYPE = 'text/markdown' URL = 'https://github.com/MaxHalford/prince' EMAIL = 'maxhalford25@gmail.com' AUTHOR = 'Max Halford' REQUIRES_PYTHON = '>=3.4.0' VERSION = None # What packages are required for this module to be executed? REQUIRED = [ 'matplotlib>=3.0.2', 'numpy>=1.16.1', 'pandas>=0.24.0', 'scipy>=1.1.0', 'scikit-learn>=0.20.1' ] # The rest you shouldn't have to touch too much :) # ------------------------------------------------ # Except, perhaps the License and Trove Classifiers! # If you do change the License, remember to change the Trove Classifier for that! here = os.path.abspath(os.path.dirname(__file__)) # Import the README and use it as the long-description. # Note: this will only work if 'README.rst' is present in your MANIFEST.in file! with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f: long_description = '\n' + f.read() # Load the package's __version__.py module as a dictionary. about = {} if not VERSION: with open(os.path.join(here, NAME, '__version__.py')) as f: exec(f.read(), about) else: about['__version__'] = VERSION class UploadCommand(Command): """Support setup.py upload.""" description = 'Build and publish the package.' user_options = [] @staticmethod def status(s): """Prints things in bold.""" print('\033[1m{0}\033[0m'.format(s)) def initialize_options(self): pass def finalize_options(self): pass def run(self): try: self.status('Removing previous builds…') rmtree(os.path.join(here, 'dist')) except OSError: pass self.status('Building Source and Wheel (universal) distribution…') os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable)) self.status('Uploading the package to PyPi via Twine…') os.system('twine upload dist/*') self.status('Pushing git tags…') os.system('git tag v{0}'.format(about['__version__'])) os.system('git push --tags') sys.exit() # Where the magic happens: setup( name=NAME, version=about['__version__'], description=DESCRIPTION, long_description=long_description, long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE, author=AUTHOR, author_email=EMAIL, python_requires=REQUIRES_PYTHON, url=URL, packages=find_packages(exclude=('tests',)), # If your package is a single module, use this instead of 'packages': # py_modules=['mypackage'], # entry_points={ # 'console_scripts': ['mycli=mymodule:cli'], # }, install_requires=REQUIRED, include_package_data=True, license='MIT', classifiers=[ # Trove classifiers # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy' ], # $ setup.py publish support. cmdclass={ 'upload': UploadCommand, }, )
mit
hsamleandro/Salmonella_pub
scripts/makeconsensus.py
1
2884
#!/usr/bin/python import argparse import pandas as pd import os from Bio import SeqIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord def numpos(var): newpos = [] for i in range(len(var)): if len(var.ix[i,'REF'])==1: newpos.append([var.ix[i,'POS']-1]) else: a=[] for x in range(len(var.ix[i,'REF'])): a.append(var.ix[i,'POS']+x-1) newpos.append(a) return newpos def consensus_list(ref,var): genome = str(ref.seq) seq = [] snps=[] index = 0 for i in range(len(var)): entry = var.ix[i] r = entry['REF'] v = entry['ALT'] positions = entry['newpos'] if len(positions)==1: seq.append(genome[index:int(positions[0])]) index = int(positions[0]+1) seq.append(v.lower()) else: seq.append(genome[index:int(positions[0])]) index = int(positions[-1]+1) seq.append(v.lower()) seq.append(genome[index:]) return seq def joinList(consensus,var): seq = "" snps = [] index=0 varcount = 0 snps_ref = [] ref_pos = [] for i in consensus: if i.islower(): for x in range(len(i)): snps_ref.append(varcount) snps.append(index+1+x) varcount+=1 seq=seq+i.upper() index = len(seq)-1 for j in snps_ref: ref_pos.append(var.ix[j,'POS']) return seq,snps,ref_pos def main(): parser = argparse.ArgumentParser() parser.add_argument('-r','--reference',help='Input reference fasta',required=True) parser.add_argument('-f','--file',help='Input vcf formatted SNPS file',required=True) parser.add_argument('-o','--output',help='Output folder',required=True) args=parser.parse_args() reference = list(SeqIO.parse(args.reference,"fasta"))[0] vcfFile = args.file outputfolder = args.output sample_name = vcfFile.split('.vcf')[0] variants = pd.read_csv(vcfFile,skiprows=23,sep='\t')[['POS','ID','REF','ALT']] variants = variants.drop_duplicates() variants = variants.sort_values('POS') variants = variants.reset_index() variants['REFLEN']=variants['REF'].apply(len) variants['newpos']=numpos(variants) if not os.path.exists(outputfolder): os.mkdir(outputfolder) consensus = consensus_list(reference,variants) cns_string,snp_locations,ref_snp_locs = joinList(consensus,variants) print(sample_name) samplename = sample_name.split("/")[-1].split('.')[0] print(samplename) cns_seq = Seq(cns_string) record = SeqRecord(cns_seq, id=samplename, name=sample_name, description="consensus genome") output_handle = open(outputfolder + "/" + samplename + ".fasta", "w") SeqIO.write(record, output_handle, "fasta") output_handle.close() main()
gpl-2.0
rishikksh20/scikit-learn
sklearn/tests/test_kernel_approximation.py
78
7586
import numpy as np from scipy.sparse import csr_matrix from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_array_almost_equal, assert_raises from sklearn.utils.testing import assert_less_equal from sklearn.metrics.pairwise import kernel_metrics from sklearn.kernel_approximation import RBFSampler from sklearn.kernel_approximation import AdditiveChi2Sampler from sklearn.kernel_approximation import SkewedChi2Sampler from sklearn.kernel_approximation import Nystroem from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel # generate data rng = np.random.RandomState(0) X = rng.random_sample(size=(300, 50)) Y = rng.random_sample(size=(300, 50)) X /= X.sum(axis=1)[:, np.newaxis] Y /= Y.sum(axis=1)[:, np.newaxis] def test_additive_chi2_sampler(): # test that AdditiveChi2Sampler approximates kernel on random data # compute exact kernel # abbreviations for easier formula X_ = X[:, np.newaxis, :] Y_ = Y[np.newaxis, :, :] large_kernel = 2 * X_ * Y_ / (X_ + Y_) # reduce to n_samples_x x n_samples_y by summing over features kernel = (large_kernel.sum(axis=2)) # approximate kernel mapping transform = AdditiveChi2Sampler(sample_steps=3) X_trans = transform.fit_transform(X) Y_trans = transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) assert_array_almost_equal(kernel, kernel_approx, 1) X_sp_trans = transform.fit_transform(csr_matrix(X)) Y_sp_trans = transform.transform(csr_matrix(Y)) assert_array_equal(X_trans, X_sp_trans.A) assert_array_equal(Y_trans, Y_sp_trans.A) # test error is raised on negative input Y_neg = Y.copy() Y_neg[0, 0] = -1 assert_raises(ValueError, transform.transform, Y_neg) # test error on invalid sample_steps transform = AdditiveChi2Sampler(sample_steps=4) assert_raises(ValueError, transform.fit, X) # test that the sample interval is set correctly sample_steps_available = [1, 2, 3] for sample_steps in sample_steps_available: # test that the sample_interval is initialized correctly transform = AdditiveChi2Sampler(sample_steps=sample_steps) assert_equal(transform.sample_interval, None) # test that the sample_interval is changed in the fit method transform.fit(X) assert_not_equal(transform.sample_interval_, None) # test that the sample_interval is set correctly sample_interval = 0.3 transform = AdditiveChi2Sampler(sample_steps=4, sample_interval=sample_interval) assert_equal(transform.sample_interval, sample_interval) transform.fit(X) assert_equal(transform.sample_interval_, sample_interval) def test_skewed_chi2_sampler(): # test that RBFSampler approximates kernel on random data # compute exact kernel c = 0.03 # abbreviations for easier formula X_c = (X + c)[:, np.newaxis, :] Y_c = (Y + c)[np.newaxis, :, :] # we do it in log-space in the hope that it's more stable # this array is n_samples_x x n_samples_y big x n_features log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) - np.log(X_c + Y_c)) # reduce to n_samples_x x n_samples_y by summing over features in log-space kernel = np.exp(log_kernel.sum(axis=2)) # approximate kernel mapping transform = SkewedChi2Sampler(skewedness=c, n_components=1000, random_state=42) X_trans = transform.fit_transform(X) Y_trans = transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) assert_array_almost_equal(kernel, kernel_approx, 1) # test error is raised on negative input Y_neg = Y.copy() Y_neg[0, 0] = -1 assert_raises(ValueError, transform.transform, Y_neg) def test_rbf_sampler(): # test that RBFSampler approximates kernel on random data # compute exact kernel gamma = 10. kernel = rbf_kernel(X, Y, gamma=gamma) # approximate kernel mapping rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42) X_trans = rbf_transform.fit_transform(X) Y_trans = rbf_transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) error = kernel - kernel_approx assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased np.abs(error, out=error) assert_less_equal(np.max(error), 0.1) # nothing too far off assert_less_equal(np.mean(error), 0.05) # mean is fairly close def test_input_validation(): # Regression test: kernel approx. transformers should work on lists # No assertions; the old versions would simply crash X = [[1, 2], [3, 4], [5, 6]] AdditiveChi2Sampler().fit(X).transform(X) SkewedChi2Sampler().fit(X).transform(X) RBFSampler().fit(X).transform(X) X = csr_matrix(X) RBFSampler().fit(X).transform(X) def test_nystroem_approximation(): # some basic tests rnd = np.random.RandomState(0) X = rnd.uniform(size=(10, 4)) # With n_components = n_samples this is exact X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X) K = rbf_kernel(X) assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K) trans = Nystroem(n_components=2, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) # test callable kernel linear_kernel = lambda X, Y: np.dot(X, Y.T) trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) # test that available kernels fit and transform kernels_available = kernel_metrics() for kern in kernels_available: trans = Nystroem(n_components=2, kernel=kern, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) def test_nystroem_singular_kernel(): # test that nystroem works with singular kernel matrix rng = np.random.RandomState(0) X = rng.rand(10, 20) X = np.vstack([X] * 2) # duplicate samples gamma = 100 N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X) X_transformed = N.transform(X) K = rbf_kernel(X, gamma=gamma) assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T)) assert_true(np.all(np.isfinite(Y))) def test_nystroem_poly_kernel_params(): # Non-regression: Nystroem should pass other parameters beside gamma. rnd = np.random.RandomState(37) X = rnd.uniform(size=(10, 4)) K = polynomial_kernel(X, degree=3.1, coef0=.1) nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0], degree=3.1, coef0=.1) X_transformed = nystroem.fit_transform(X) assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K) def test_nystroem_callable(): # Test Nystroem on a callable. rnd = np.random.RandomState(42) n_samples = 10 X = rnd.uniform(size=(n_samples, 4)) def logging_histogram_kernel(x, y, log): """Histogram kernel that writes to a log.""" log.append(1) return np.minimum(x, y).sum() kernel_log = [] X = list(X) # test input validation Nystroem(kernel=logging_histogram_kernel, n_components=(n_samples - 1), kernel_params={'log': kernel_log}).fit(X) assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
bsd-3-clause
rraadd88/dms2dfe
dms2dfe/lib/plot_pdb.py
2
1725
#!usr/bin/python # Copyright 2016, Rohan Dandage <rraadd_8@hotmail.com,rohan@igib.in> # This program is distributed under General Public License v. 3. """ ================================ ``plot_pdb`` ================================ """ from Bio.PDB import PDBParser,PDBIO from collections import Counter import re import pandas as pd from scipy import optimize from multiprocessing import Pool import numpy as np import matplotlib import matplotlib.pyplot as plt matplotlib.style.use('ggplot') import seaborn as sns import logging logging.basicConfig(format='[%(asctime)s] %(levelname)s\tfrom %(filename)s in %(funcName)s(..): %(message)s',level=logging.DEBUG) # filename=cfg_xls_fh+'.log' def vector2bfactor(vector,pdb_fh,pdb_clrd_fh): """ Incorporates vector with values to the B-factor of PDB file. :param vector: vector with values :param pdb_fh: path of input PDB file :param pdb_clrd_fh: path of output PDB file """ aas_21_3letter=['ALA','ARG','ASN','ASP','CYS','GLN','GLU','GLY','HIS','ILE', 'LEU','LYS','MET','PHE','PRO','SER','THR','TRP','TYR','VAL'] pdb_parser=PDBParser() pdb_data=pdb_parser.get_structure("pdb_name",pdb_fh) for model in pdb_data: for chain in model: for residue in chain: if residue.get_resname() in aas_21_3letter: #only aas for atom in residue: #print residue.id[1] # break if residue.id[1]<=len(vector): atom.set_bfactor(vector[residue.id[1]-1]) #residue.id is 1 based count pdb_io = PDBIO() pdb_io.set_structure(pdb_data) pdb_io.save(pdb_clrd_fh)
gpl-3.0
weidel-p/nest-simulator
pynest/examples/plot_weight_matrices.py
9
6702
# -*- coding: utf-8 -*- # # plot_weight_matrices.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """ Plot weight matrices example ---------------------------- This example demonstrates how to extract the connection strength for all the synapses among two populations of neurons and gather these values in weight matrices for further analysis and visualization. All connection types between these populations are considered, i.e., four weight matrices are created and plotted. """ ############################################################################### # First, we import all necessary modules to extract, handle and plot # the connectivity matrices import numpy as np import matplotlib.pyplot as plt import nest import matplotlib.gridspec as gridspec from mpl_toolkits.axes_grid1 import make_axes_locatable ############################################################################### # We now specify a function to extract and plot weight matrices for all # connections among `E_neurons` and `I_neurons`. # # We initialize all the matrices, whose dimensionality is determined by the # number of elements in each population. # Since in this example, we have 2 populations (E/I), :math:`2^2` possible # synaptic connections exist (EE, EI, IE, II). def plot_weight_matrices(E_neurons, I_neurons): W_EE = np.zeros([len(E_neurons), len(E_neurons)]) W_EI = np.zeros([len(I_neurons), len(E_neurons)]) W_IE = np.zeros([len(E_neurons), len(I_neurons)]) W_II = np.zeros([len(I_neurons), len(I_neurons)]) a_EE = nest.GetConnections(E_neurons, E_neurons) ''' Using `get`, we can extract the value of the connection weight, for all the connections between these populations ''' c_EE = a_EE.weight ''' Repeat the two previous steps for all other connection types ''' a_EI = nest.GetConnections(I_neurons, E_neurons) c_EI = a_EI.weight a_IE = nest.GetConnections(E_neurons, I_neurons) c_IE = a_IE.weight a_II = nest.GetConnections(I_neurons, I_neurons) c_II = a_II.weight ''' We now iterate through the range of all connections of each type. To populate the corresponding weight matrix, we begin by identifying the source-node_id (by using .source) and the target-node_id. For each node_id, we subtract the minimum node_id within the corresponding population, to assure the matrix indices range from 0 to the size of the population. After determining the matrix indices [i, j], for each connection object, the corresponding weight is added to the entry W[i,j]. The procedure is then repeated for all the different connection types. ''' a_EE_src = a_EE.source a_EE_trg = a_EE.target a_EI_src = a_EI.source a_EI_trg = a_EI.target a_IE_src = a_IE.source a_IE_trg = a_IE.target a_II_src = a_II.source a_II_trg = a_II.target for idx in range(len(a_EE)): W_EE[a_EE_src[idx] - min(E_neurons), a_EE_trg[idx] - min(E_neurons)] += c_EE[idx] for idx in range(len(a_EI)): W_EI[a_EI_src[idx] - min(I_neurons), a_EI_trg[idx] - min(E_neurons)] += c_EI[idx] for idx in range(len(a_IE)): W_IE[a_IE_src[idx] - min(E_neurons), a_IE_trg[idx] - min(I_neurons)] += c_IE[idx] for idx in range(len(a_II)): W_II[a_II_src[idx] - min(I_neurons), a_II_trg[idx] - min(I_neurons)] += c_II[idx] fig = plt.figure() fig.subtitle('Weight matrices', fontsize=14) gs = gridspec.GridSpec(4, 4) ax1 = plt.subplot(gs[:-1, :-1]) ax2 = plt.subplot(gs[:-1, -1]) ax3 = plt.subplot(gs[-1, :-1]) ax4 = plt.subplot(gs[-1, -1]) plt1 = ax1.imshow(W_EE, cmap='jet') divider = make_axes_locatable(ax1) cax = divider.append_axes("right", "5%", pad="3%") plt.colorbar(plt1, cax=cax) ax1.set_title('W_{EE}') plt.tight_layout() plt2 = ax2.imshow(W_IE) plt2.set_cmap('jet') divider = make_axes_locatable(ax2) cax = divider.append_axes("right", "5%", pad="3%") plt.colorbar(plt2, cax=cax) ax2.set_title('W_{EI}') plt.tight_layout() plt3 = ax3.imshow(W_EI) plt3.set_cmap('jet') divider = make_axes_locatable(ax3) cax = divider.append_axes("right", "5%", pad="3%") plt.colorbar(plt3, cax=cax) ax3.set_title('W_{IE}') plt.tight_layout() plt4 = ax4.imshow(W_II) plt4.set_cmap('jet') divider = make_axes_locatable(ax4) cax = divider.append_axes("right", "5%", pad="3%") plt.colorbar(plt4, cax=cax) ax4.set_title('W_{II}') plt.tight_layout() ################################################################################# # The script iterates through the list of all connections of each type. # To populate the corresponding weight matrix, we identify the source-node_id # (first element of each connection object, `n[0]`) and the target-node_id (second # element of each connection object, `n[1]`). # For each `node_id`, we subtract the minimum `node_id` within the corresponding # population, to assure the matrix indices range from 0 to the size of the # population. # # After determining the matrix indices `[i, j]`, for each connection object, the # corresponding weight is added to the entry `W[i,j]`. The procedure is then # repeated for all the different connection types. # # We then plot the figure, specifying the properties we want. For example, we # can display all the weight matrices in a single figure, which requires us to # use ``GridSpec`` to specify the spatial arrangement of the axes. # A subplot is subsequently created for each connection type. Using ``imshow``, # we can visualize the weight matrix in the corresponding axis. We can also # specify the colormap for this image. # Using the ``axis_divider`` module from ``mpl_toolkits``, we can allocate a small # extra space on the right of the current axis, which we reserve for a # colorbar. # We can set the title of each axis and adjust the axis subplot parameters. # Finally, the last three steps are repeated for each synapse type.
gpl-2.0
pavelchristof/gomoku-ai
tensorflow/examples/learn/iris_custom_decay_dnn.py
37
3774
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of DNNClassifier for Iris plant dataset, with exponential decay.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from sklearn import datasets from sklearn import metrics from sklearn import model_selection import tensorflow as tf X_FEATURE = 'x' # Name of the input feature. def my_model(features, labels, mode): """DNN with three hidden layers.""" # Create three fully connected layers respectively of size 10, 20, and 10. net = features[X_FEATURE] for units in [10, 20, 10]: net = tf.layers.dense(net, units=units, activation=tf.nn.relu) # Compute logits (1 per class). logits = tf.layers.dense(net, 3, activation=None) # Compute predictions. predicted_classes = tf.argmax(logits, 1) if mode == tf.estimator.ModeKeys.PREDICT: predictions = { 'class': predicted_classes, 'prob': tf.nn.softmax(logits) } return tf.estimator.EstimatorSpec(mode, predictions=predictions) # Convert the labels to a one-hot tensor of shape (length of features, 3) and # with a on-value of 1 for each one-hot vector of length 3. onehot_labels = tf.one_hot(labels, 3, 1, 0) # Compute loss. loss = tf.losses.softmax_cross_entropy( onehot_labels=onehot_labels, logits=logits) # Create training op with exponentially decaying learning rate. if mode == tf.estimator.ModeKeys.TRAIN: global_step = tf.train.get_global_step() learning_rate = tf.train.exponential_decay( learning_rate=0.1, global_step=global_step, decay_steps=100, decay_rate=0.001) optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss, global_step=global_step) return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) # Compute evaluation metrics. eval_metric_ops = { 'accuracy': tf.metrics.accuracy( labels=labels, predictions=predicted_classes) } return tf.estimator.EstimatorSpec( mode, loss=loss, eval_metric_ops=eval_metric_ops) def main(unused_argv): iris = datasets.load_iris() x_train, x_test, y_train, y_test = model_selection.train_test_split( iris.data, iris.target, test_size=0.2, random_state=42) classifier = tf.estimator.Estimator(model_fn=my_model) # Train. train_input_fn = tf.estimator.inputs.numpy_input_fn( x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True) classifier.train(input_fn=train_input_fn, steps=1000) # Predict. test_input_fn = tf.estimator.inputs.numpy_input_fn( x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False) predictions = classifier.predict(input_fn=test_input_fn) y_predicted = np.array(list(p['class'] for p in predictions)) y_predicted = y_predicted.reshape(np.array(y_test).shape) # Score with sklearn. score = metrics.accuracy_score(y_test, y_predicted) print('Accuracy (sklearn): {0:f}'.format(score)) # Score with tensorflow. scores = classifier.evaluate(input_fn=test_input_fn) print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy'])) if __name__ == '__main__': tf.app.run()
apache-2.0
Tjorriemorrie/trading
18_theoryofruns/runs.py
1
2893
import logging import numpy as np import argparse import pandas as pd from time import time from pprint import pprint from random import random, choice, shuffle, randint from main import loadData, loadQ, saveQ, getBackgroundKnowledge, summarizeActions from world import DATA, PERIODS, ACTIONS, getState, getReward ''' https://www.inovancetech.com/strategyEvaluation2.html ''' ''' 2015-03-20 15:09:19,521 root WARNING probabilities: 2015-03-20 15:09:19,522 root INFO 1: 12.50% every 8 2015-03-20 15:09:19,522 root INFO 2: 6.25% every 16 2015-03-20 15:09:19,522 root INFO 3: 3.12% every 32 2015-03-20 15:09:19,522 root INFO 4: 1.56% every 64 2015-03-20 15:09:19,522 root INFO 5: 0.78% every 128 2015-03-20 15:09:19,522 root INFO 6: 0.39% every 256 2015-03-20 15:09:19,522 root INFO 7: 0.20% every 512 2015-03-20 15:09:19,522 root INFO 8: 0.10% every 1024 2015-03-20 15:09:19,522 root INFO 9: 0.05% every 2048 1849 1: 231 2: 116 3: 58 4: 29 5: 14 6: 7 7: 4 8: 2 9: 1 {'False_1': 248, 'False_2': 120, 'False_3': 66, 'False_4': 20, 'False_5': 14, 'False_6': 3, 'False_7': 2, 'False_8': 3, 'True_1': 234, 'True_2': 115, 'True_3': 64, 'True_4': 32, 'True_5': 21, 'True_6': 5, 'True_7': 3, 'True_8': 2} 1 1 2 2 3 4 4 8 5 16 6 32 7 64 8 128 9 256 ''' def main(): for info in DATA: currency = info['currency'] min_trail = info['trail'] interval = info['intervals'][0] pip_mul = info['pip_mul'] logging.info(currency) df = loadData(currency, interval, 'all') # print df df['yesterday'] = df['close'].shift(1) # print df df['up'] = df.apply(lambda x: True if x['close'] - x['yesterday'] > 0 else False, axis=1) df.dropna(inplace=True) print df results = {} run_dir = True run_len = 0 for idx, row in df.iterrows(): # logging.info(row['up']) if row['up'] != run_dir: key = '{0}'.format(run_len) if key not in results: results[key] = 0 results[key] += 1 run_dir = row['up'] run_len = 1 else: run_len += 1 pprint(results) logging.warn('probabilities:') for n in xrange(1, 10): p = 0.5**(n+1) every = 1/p expecting = len(df) / every logging.info('{0}: {1:.2f}% every {2:.0f} expecting {3:.0f}'.format(n, p * 100, every, expecting)) break if __name__ == '__main__': # parser = argparse.ArgumentParser() # args = parser.parse_args() logging.basicConfig( level=logging.DEBUG, format='%(asctime)s %(name)-8s %(levelname)-8s %(message)s', # datefmt='%Y-%m-%d %H:%M:', ) main()
mit
jld23/saspy
saspy/tests/test_sasdata.py
1
18298
import os import unittest import pandas as pd from IPython.utils.tempdir import TemporaryDirectory from pandas.util.testing import assert_frame_equal import saspy from saspy.sasdata import SASdata from saspy.sasresults import SASresults class TestSASdataObject(unittest.TestCase): @classmethod def setUpClass(cls): cls.sas = saspy.SASsession(results='HTML') @classmethod def tearDownClass(cls): if cls.sas: cls.sas._endsas() def test_SASdata(self): """ test sasdata method """ cars = self.sas.sasdata('cars', libref='sashelp', results='text') self.assertIsInstance(cars, SASdata, msg="cars = sas.sasdata(...) failed") def test_SASdata_batch(self): """ test set_batch() """ cars = self.sas.sasdata('cars', libref='sashelp', results='text') self.sas.set_batch(True) ll = cars.head() self.assertIsInstance(ll, dict, msg="set_batch(True) didn't return dict") def test_SASdata_head(self): """ test head() """ cars = self.sas.sasdata('cars', libref='sashelp', results='text') self.sas.set_batch(True) ll = cars.head() expected = ['1', 'Acura', 'MDX', 'SUV', 'Asia', 'All', '$36,945', '$33,337', '3.5', '6', '265', '17', '23', '4451', '106', '189'] rows = ll['LST'].splitlines() retrieved = [] for i in range(len(rows)): retrieved.append(rows[i].split()) self.assertIn(expected, retrieved, msg="cars.head() result didn't contain row 1") @unittest.skip("Test failes with extra header info") def test_SASdata_tail(self): """ test tail() """ cars = self.sas.sasdata('cars', libref='sashelp', results='text') self.sas.set_batch(True) ll = cars.tail() expected = ['424', 'Volvo', 'C70', 'LPT', 'convertible', '2dr', 'Sedan', 'Europe', 'Front', '$40,565', '$38,203', '2.4', '5', '197', '21', '28', '3450', '105', '186'] rows = ll['LST'].splitlines() retrieved = [] for i in range(len(rows)): retrieved.append(rows[i].split()) self.assertIn(expected, retrieved, msg="cars.tail() result didn't contain row 1") def test_SASdata_tailPD(self): """ test tail() """ cars = self.sas.sasdata('cars', libref='sashelp', results='pandas') self.sas.set_batch(True) ll = cars.tail() self.assertEqual(ll.shape, (5, 15), msg="wrong shape returned") self.assertIsInstance(ll, pd.DataFrame, "Is return type correct") def test_SASdata_contents(self): """ test contents() """ cars = self.sas.sasdata('cars', libref='sashelp', results='text') self.sas.set_batch(True) ll = cars.contents() expected = ['Data', 'Set', 'Name', 'SASHELP.CARS', 'Observations', '428'] rows = ll['LST'].splitlines() retrieved = [] for i in range(len(rows)): retrieved.append(rows[i].split()) self.assertIn(expected, retrieved, msg="cars.contents() result didn't contain expected result") def test_SASdata_describe(self): """ test describe() """ self.skipTest("column output doesn't match the current method. I'm skipping the test for now") cars = self.sas.sasdata('cars', libref='sashelp', results='text') self.sas.set_batch(True) ll = cars.describe() expected = ['MSRP', '428', '0', '27635', '32775', '19432', '10280', '20330', '27635'] rows = ll['LST'].splitlines() retrieved = [] for i in range(len(rows)): retrieved.append(rows[i].split()) self.assertIn(expected, retrieved, msg="cars.describe() result didn't contain expected result") def test_SASdata_describe2(self): """ test describe() """ cars = self.sas.sasdata('cars', libref='sashelp') self.sas.set_batch(True) cars.set_results('PANDAS') ll = cars.describe() self.assertIsInstance(ll, pd.DataFrame, msg='ll is not a dataframe') expected = ['MSRP', '428', '0', '27635', '32775', '19432', '10280', '20330', '27635', '39215', '192465'] self.assertEqual(['%.0f' % elem for elem in list(ll.iloc[0].dropna())[1:]], expected[1:], msg="cars.describe() result didn't contain expected result") self.assertEqual(expected[0],list(ll.iloc[0].dropna())[0], msg="cars.describe() result didn't contain expected result") def test_SASdata_results(self): """ test set_results() """ cars = self.sas.sasdata('cars', libref='sashelp', results='text') self.sas.set_batch(True) cars.set_results('HTML') ll = cars.describe() expected = '<!DOCTYPE html>' row1 = ll['LST'].splitlines()[0] self.assertEqual(expected, row1, msg="cars.set_results() result weren't HTML") cars.set_results('TEXT') ll = cars.describe() row1 = ll['LST'].splitlines()[0] self.assertNotEqual(expected, row1, msg="cars.set_results() result weren't TEXT") def test_SASdata_hist(self): """ test hist() """ cars = self.sas.sasdata('cars', libref='sashelp', results='text') self.sas.set_batch(True) cars.set_results('TEXT') ll = cars.hist('MSRP') expected = 'alt="The SGPlot Procedure" src="data:image/png;base64' self.assertIsInstance(ll, dict, msg="cars.hist(...) didn't return dict") self.assertGreater(len(ll['LST']), 40000, msg="cars.hist(...) result were too short") self.assertIn(expected, ll['LST'], msg="cars.hist(...) result weren't what was expected") cars.set_results('HTML') def test_SASdata_series(self): """ test series() """ self.sas.set_batch(True) ll = self.sas.submit('''proc sql; create table sales as select month, sum(actual) as tot_sales, sum(predict) as predicted_sales from sashelp.prdsale group by 1 order by month ;quit; ''') sales = self.sas.sasdata('sales') ll = sales.series(y=['tot_sales', 'predicted_sales'], x='month', title='total vs. predicted sales') expected = 'alt="The SGPlot Procedure" src="data:image/png;base64' self.assertIsInstance(ll, dict, msg="cars.series(...) didn't return dict") self.assertGreater(len(ll['LST']), 70000, msg="cars.series(...) result were too short") self.assertIn(expected, ll['LST'], msg="cars.series(...) result weren't what was expected") def test_SASdata_heatmap(self): """ test heatmap() """ cars = self.sas.sasdata('cars', libref='sashelp', results='text') self.sas.set_batch(True) ll = cars.heatmap('MSRP', 'horsepower') expected = 'alt="The SGPlot Procedure" src="data:image/png;base64' self.assertIsInstance(ll, dict, msg="cars.heatmap(...) didn't return dict") self.assertGreater(len(ll['LST']), 30000, msg="cars.heatmap(...) result were too short") self.assertIn(expected, ll['LST'], msg="cars.heatmap(...) result weren't what was expected") def test_SASdata_sort1(self): """ Create dataset in WORK """ self.sas.submit("data cars; set sashelp.cars; id=_n_;run;") wkcars = self.sas.sasdata('cars') # Sort data in place by one variable wkcars.sort('type') self.assertIsInstance(wkcars, SASdata, msg="Sort didn't return SASdata Object") def test_SASdata_sort2(self): """ Create dataset in WORK """ self.sas.submit("data cars; set sashelp.cars; id=_n_;run;") wkcars = self.sas.sasdata('cars') # Sort data in plce by multiple variables wkcars.sort('type descending origin') self.assertIsInstance(wkcars, SASdata, msg="Sort didn't return SASdata Object") def test_SASdata_sort3(self): """ Create dataset in WORK """ self.sas.submit("data cars; set sashelp.cars; id=_n_;run;") wkcars = self.sas.sasdata('cars') # create a second object pointing to the same data set dup = wkcars.sort('type') self.assertEqual(wkcars, dup, msg="Sort objects are not equal but should be") def test_SASdata_sort4(self): """ Create dataset in WORK """ self.sas.submit("data cars; set sashelp.cars; id=_n_;run;") wkcars = self.sas.sasdata('cars') # create a second object with a different sort order diff = self.sas.sasdata('diff') diff = wkcars.sort('origin', diff) self.assertNotEqual(wkcars, diff, msg="Sort objects are equal but should not be") def test_SASdata_sort5(self): """ Create dataset in WORK """ self.sas.submit("data cars; set sashelp.cars; id=_n_;run;") wkcars = self.sas.sasdata('cars') # create object within call wkcars.sort('type') out1 = wkcars.sort('origin', self.sas.sasdata('out1')) self.assertIsInstance(out1, SASdata, msg="Sort didn't return new SASdata Object") self.assertNotEqual(wkcars, out1, msg="Sort objects are equal but should not be") def test_SASdata_sort6(self): """ Create dataset in WORK """ self.sas.submit("data cars; set sashelp.cars; id=_n_;run;") wkcars = self.sas.sasdata('cars') # sort by missing variable self.assertRaises(RuntimeError, lambda: wkcars.sort('foobar')) def test_SASdata_score1(self): """ Create dataset in WORK """ self.sas.submit("data cars; set sashelp.cars; id=_n_;run;") wkcars = self.sas.sasdata('cars') a = wkcars.columnInfo() wkcars.score(code='P_originUSA = origin;') b = wkcars.columnInfo() self.assertNotEqual(a, b, msg="B should have an extra column P_originUSA") def test_SASdata_score2(self): """ Create dataset in WORK """ self.sas.submit("data cars; set sashelp.cars; id=_n_;run;") wkcars = self.sas.sasdata('cars') wkcars.set_results('PANDAS') wkcars2 = self.sas.sasdata('cars2', 'work') wkcars2.set_results('PANDAS') a = wkcars.columnInfo() wkcars.score(code='P_originUSA = origin;', out=wkcars2) b = wkcars.columnInfo() self.assertFalse(assert_frame_equal(a, b), msg="B should be identical to a") self.assertIsInstance(wkcars2, SASdata, "Does out dataset exist") def test_SASdata_score3(self): with TemporaryDirectory() as temppath: with open(os.path.join(temppath, 'score.sas'), 'w') as f: f.write('P_originUSA = origin;') # Create dataset in WORK self.sas.submit("data cars; set sashelp.cars; id=_n_;run;") wkcars = self.sas.sasdata('cars') wkcars.set_results('PANDAS') wkcars2 = self.sas.sasdata('cars2', 'work') wkcars2.set_results('PANDAS') a = wkcars.columnInfo() wkcars.score(file=f.name, out=wkcars2) b = wkcars.columnInfo() self.assertFalse(assert_frame_equal(a, b), msg="B should be identical to a") self.assertIsInstance(wkcars2, SASdata, "Does out dataset exist") def test_SASdata_score4(self): with TemporaryDirectory() as temppath: with open(os.path.join(temppath, 'score.sas'), 'w') as f: f.write('P_originUSA = origin;') # Create dataset in WORK self.sas.submit("data cars; set sashelp.cars; id=_n_;run;") wkcars = self.sas.sasdata('cars') a = wkcars.columnInfo() wkcars.score(file=f.name) b = wkcars.columnInfo() self.assertNotEqual(a, b, msg="B should have an extra column P_originUSA") def test_regScoreAssess(self): stat = self.sas.sasstat() self.sas.submit(""" data work.class; set sashelp.class; run; """) tr = self.sas.sasdata("class", "work") tr.set_results('PANDAS') with TemporaryDirectory() as temppath: fname = os.path.join(temppath, 'hpreg_code.sas') b = stat.hpreg(data=tr, model='weight=height', code=fname) tr.score(file=os.path.join(temppath, 'hpreg_code.sas')) # check that p_weight is in columnInfo self.assertTrue('P_Weight' in tr.columnInfo()['Variable'].values, msg="Prediction Column not found") res1 = tr.assessModel(target='weight', prediction='P_weight', nominal=False) a = ['ASSESSMENTBINSTATISTICS', 'ASSESSMENTSTATISTICS', 'LOG'] self.assertEqual(sorted(a), sorted(res1.__dir__()), msg=u" model failed to return correct objects expected:{0:s} returned:{1:s}".format( str(a), str(b))) self.assertIsInstance(res1, SASresults, "Is return type correct") def test_regScoreAssess2(self): stat = self.sas.sasstat() self.sas.submit(""" data work.class; set sashelp.class; run; """) tr = self.sas.sasdata("class", "work") tr.set_results('PANDAS') with TemporaryDirectory() as temppath: fname = os.path.join(temppath, 'hplogistic_code.sas') b = stat.hplogistic(data=tr, cls= 'sex', model='sex = weight height', code=fname) # This also works with hardcoded strings # b = stat.hplogistic(data=tr, cls='sex', model='sex = weight height', code=r'c:\public\foo.sas') tr.score(file=fname) # check that P_SexF is in columnInfo self.assertTrue('P_SexF' in tr.columnInfo()['Variable'].values, msg="Prediction Column not found") res1 = tr.assessModel(target='sex', prediction='P_SexF', nominal=True, event='F') a = ['ASSESSMENTBINSTATISTICS', 'ASSESSMENTSTATISTICS', 'LOG', 'SGPLOT'] self.assertEqual(sorted(a), sorted(res1.__dir__()), msg=u" model failed to return correct objects expected:{0:s} returned:{1:s}".format( str(a), str(b))) self.assertIsInstance(res1, SASresults, "Is return type correct") def test_partition1(self): self.sas.submit(""" data work.class; set sashelp.class; run; """) tr = self.sas.sasdata("class", "work") tr.set_results('PANDAS') tr.partition(var='sex', fraction=.5, kfold=1, out=None, singleOut=True) self.assertTrue('_PartInd_' in tr.columnInfo()['Variable'].values, msg="Partition Column not found") def test_partition2(self): self.sas.submit(""" data work.class; set sashelp.class; run; """) tr = self.sas.sasdata("class", "work") tr.set_results('PANDAS') tr.partition(var='sex', fraction=.5, kfold=2, out=None, singleOut=True) self.assertTrue('_cvfold2' in tr.columnInfo()['Variable'].values, msg="Partition Column not found") def test_partition3(self): self.sas.submit(""" data work.class; set sashelp.class; run; """) tr = self.sas.sasdata("class", "work") out = self.sas.sasdata("class2", "work") tr.set_results('PANDAS') out.set_results('PANDAS') tr.partition(var='sex', fraction=.5, kfold=2, out=out, singleOut=True) self.assertFalse('_cvfold1' in tr.columnInfo()['Variable'].values, msg="Writing to wrong table") self.assertFalse('_PartInd_ ' in tr.columnInfo()['Variable'].values, msg="Writing to wrong table") self.assertTrue('_cvfold2' in out.columnInfo()['Variable'].values, msg="Partition Column not found") def test_partition4(self): self.sas.submit(""" data work.class; set sashelp.class; run; """) tr = self.sas.sasdata("class", "work") out = self.sas.sasdata("class2", "work") tr.set_results('PANDAS') out.set_results('PANDAS') res1 = tr.partition(var='sex', fraction=.5, kfold=2, out=out, singleOut=False) self.assertFalse('_cvfold1' in tr.columnInfo()['Variable'].values, msg="Writing to wrong table") self.assertFalse('_PartInd_ ' in tr.columnInfo()['Variable'].values, msg="Writing to wrong table") self.assertTrue('_cvfold2' in out.columnInfo()['Variable'].values, msg="Partition Column not found") self.assertIsInstance(res1, list, "Is return type correct") self.assertIsInstance(res1[0], tuple, "Is return type correct") self.assertIsInstance(res1[0][1], SASdata, "Is return type correct") def test_partition5(self): self.sas.submit(""" data work.class; set sashelp.class; run; """) tr = self.sas.sasdata("class", "work") tr.set_results('PANDAS') tr.partition(fraction=.5, kfold=1, out=None, singleOut=True) self.assertTrue('_PartInd_' in tr.columnInfo()['Variable'].values, msg="Partition Column not found") def test_info1(self): tr = self.sas.sasdata("class", "sashelp") tr.set_results('Pandas') res = tr.info() self.assertIsInstance(res, pd.DataFrame, msg='Data frame not returned') self.assertEqual(res.shape, (5, 4), msg="wrong shape returned") def test_info2(self): tr = self.sas.sasdata("class", "sashelp") tr.set_results('text') res = tr.info() self.assertIsNone(res, msg="only works with Pandas") def test_info3(self): tr = self.sas.sasdata("class", "sashelp") tr.set_results('html') res = tr.info() self.assertIsNone(res, msg="only works with Pandas") if __name__ == "__main__": unittest.main()
apache-2.0
fenderglass/Nano-Align
nanoalign/random_forest.py
1
2935
#(c) 2015-2016 by Authors #This file is a part of Nano-Align program. #Released under the BSD license (see LICENSE file) from __future__ import print_function import random from itertools import chain import numpy as np from sklearn.ensemble import RandomForestRegressor from sklearn.feature_selection import f_regression from nanoalign.blockade_modlel import BlockadeModel class RandomForestBlockade(BlockadeModel): def __init__(self): super(RandomForestBlockade, self).__init__() self.name = "RandomForest" self.rf_cache = {} def train(self, peptides, signals): features = map(lambda p: self._peptide_to_features(p, shuffle=True), peptides) train_features = np.array(sum(features, [])) #regulzrisation noise_features = [] for data in train_features: noise_features.append(map(lambda f: f + random.gauss(0, 10), data)) ## train_signals = np.array(sum(signals, [])) assert len(train_features) == len(train_signals) self.predictor = RandomForestRegressor(n_estimators=10) self.predictor.fit(noise_features, train_signals) #print(f_regression(noise_features, train_signals)) print(self.predictor.feature_importances_) print(self.predictor.score(noise_features, train_signals)) def _rf_predict(self, feature_vec): """ Predicts signal for a feature vector """ if feature_vec not in self.rf_cache: np_feature = np.array(feature_vec).reshape(1, -1) self.rf_cache[feature_vec] = self.predictor.predict(np_feature)[0] return self.rf_cache[feature_vec] def peptide_signal(self, peptide): """ Generates theoretical signal of a given peptide """ assert self.predictor is not None features = self._peptide_to_features(peptide, shuffle=False) signal = np.array(map(lambda x: self._rf_predict(x), features)) #signal = signal / np.std(signal) return signal def _peptide_to_features(self, peptide, shuffle): volumes = map(self.volumes.get, peptide) hydro = map(self.hydro.get, peptide) num_peaks = len(volumes) + self.window - 1 flanked_volumes = ([0] * (self.window - 1) + volumes + [0] * (self.window - 1)) flanked_hydro = ([0] * (self.window - 1) + hydro + [0] * (self.window - 1)) features = [] for i in xrange(0, num_peaks): v = flanked_volumes[i : i + self.window] #if shuffle: # random.shuffle(v) #features.append(tuple(v)) h = flanked_hydro[i : i + self.window] combined = zip(v, h) if shuffle: random.shuffle(combined) features.append(tuple(list(chain(*combined)))) return features
bsd-2-clause
ezhouyang/class
combine.py
1
3583
#coding:utf-8 ''' 尝试利用多特征融合的手段 ''' import csv import nltk import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score from sklearn.feature_selection import RFE def read_file(): print "读文件" f = open("train.tsv","U") reader = csv.reader(f,delimiter='\t') #用来放训练语料 train_content = [] train_url = [] label = [] test_content = [] test_url = [] answer = [] porter = nltk.PorterStemmer() g = lambda x : x.isalpha or x == ' ' a = 0 print "开始读文件" for row in reader : if a == 0: a = a + 1 else: row[2] = filter(g,row[2]) raw_con = row[2].split() raw_con = [porter.stem(t) for t in raw_con] row[2] = ' '.join(raw_con) if a %4 != 0: label.append(int(row[len(row)-1])) train_content.append(row[2]) train_url.append(row[0]) else: answer.append(int(row[len(row)-1])) test_content.append(row[2]) test_url.append(row[0]) a = a + 1 f.close() print "读文件结束" return train_content,train_url,label,\ test_content,test_url,answer if __name__ == "__main__": train_content,train_url,label,test_content,test_url,answer = read_file() vectorizer = TfidfVectorizer(max_features=None,min_df=2,max_df=1.0,sublinear_tf=True,ngram_range=(1,2),smooth_idf=True,token_pattern=r'\w{1,}',analyzer='word',strip_accents='unicode') vectorizer1 = TfidfVectorizer(max_features=None,min_df=1,max_df=1.0,sublinear_tf=True,ngram_range=(1,2),smooth_idf=True,token_pattern=r'\w{1,}',analyzer='word',strip_accents='unicode') print "构建tf-idf矩阵" print "构建内容矩阵" length_train = len(train_content) x_all = train_content+test_content x_all = vectorizer.fit_transform(x_all) x_content = x_all[:length_train] t_content = x_all[length_train:] print "构建url矩阵" length_train = len(train_content) x_all = train_url+test_url x_all = vectorizer1.fit_transform(x_all) x_url = x_all[:length_train] t_url = x_all[length_train:] print "x content shape",x_content.shape print "t content shape",t_content.shape print "x url shape",x_url.shape print "t url shape",t_url.shape label = np.array(label) answer = np.array(answer) clf = LogisticRegression(penalty='l2',dual=True,fit_intercept=False,C=1.0,tol=0.0001,class_weight=None, random_state=None, intercept_scaling=1.0) clf1 = LogisticRegression(penalty='l2',dual=True,fit_intercept=False,C=1.0,tol=0.0001,class_weight=None, random_state=None, intercept_scaling=1.0) print "feature selection" selector = RFE(clf) x_content = selector.fit_transform(x_content,label) t_content = selector.transform(t_content) print "x content shape",x_content.shape print "t content shape",t_content.shape print "训练content lr" clf.fit(x_content,label) pred0 = clf.predict_proba(t_content)[:,1] print "content roc score",roc_auc_score(answer,pred0) print "训练url lr" clf1.fit(x_url,label) pred1 = clf1.predict_proba(t_url)[:,1] print "content roc score",roc_auc_score(answer,pred1) weight = 2.0 pred = pred0*weight + pred1 pred = 1.0*pred/(weight+1.0) print "简单平均 roc score",roc_auc_score(answer,pred)
apache-2.0
stczhc/neupy
tests/algorithms/rbfn/test_rbf_kmeans.py
1
1908
import numpy as np import matplotlib.pyplot as plt from neupy import algorithms from base import BaseTestCase data = np.array([ [0.11, 0.20], [0.25, 0.32], [0.64, 0.60], [0.12, 0.42], [0.70, 0.73], [0.30, 0.27], [0.43, 0.81], [0.44, 0.87], [0.12, 0.92], [0.56, 0.67], [0.36, 0.35], ]) class RBFKMeansTestCase(BaseTestCase): def test_validation(self): with self.assertRaises(ValueError): # More clusters than samples nw = algorithms.RBFKMeans(n_clusters=1000, verbose=False) nw.train(data, epsilon=1e-5) with self.assertRaises(ValueError): # Number of clusters the same as number of samples nw = algorithms.RBFKMeans(n_clusters=data.shape[0], verbose=False) nw.train(data, epsilon=1e-5) with self.assertRaises(ValueError): # One cluster nw = algorithms.RBFKMeans(n_clusters=1, verbose=False) nw.train(data, epsilon=1e-5) def test_classification(self): expected_centers = np.array([ [0.228, 0.312], [0.482, 0.767], ]) nw = algorithms.RBFKMeans(n_clusters=2, verbose=False) nw.train(data, epsilon=1e-5) np.testing.assert_array_almost_equal(expected_centers, nw.centers, decimal=3) def test_train_different_inputs(self): self.assertInvalidVectorTrain( algorithms.RBFKMeans(n_clusters=2, verbose=False), np.array([1, 2, 10]), ) def test_predict_different_inputs(self): kmnet = algorithms.RBFKMeans(verbose=False, n_clusters=2) data = np.array([[1, 2, 10]]).T target = np.array([[0, 0, 1]]).T kmnet.train(data) self.assertInvalidVectorPred(kmnet, data.ravel(), target, decimal=2)
mit
futurulus/scipy
scipy/interpolate/tests/test_rbf.py
45
4626
#!/usr/bin/env python # Created by John Travers, Robert Hetland, 2007 """ Test functions for rbf module """ from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import (assert_, assert_array_almost_equal, assert_almost_equal, run_module_suite) from numpy import linspace, sin, random, exp, allclose from scipy.interpolate.rbf import Rbf FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian', 'cubic', 'quintic', 'thin-plate', 'linear') def check_rbf1d_interpolation(function): # Check that the Rbf function interpolates through the nodes (1D) x = linspace(0,10,9) y = sin(x) rbf = Rbf(x, y, function=function) yi = rbf(x) assert_array_almost_equal(y, yi) assert_almost_equal(rbf(float(x[0])), y[0]) def check_rbf2d_interpolation(function): # Check that the Rbf function interpolates through the nodes (2D). x = random.rand(50,1)*4-2 y = random.rand(50,1)*4-2 z = x*exp(-x**2-1j*y**2) rbf = Rbf(x, y, z, epsilon=2, function=function) zi = rbf(x, y) zi.shape = x.shape assert_array_almost_equal(z, zi) def check_rbf3d_interpolation(function): # Check that the Rbf function interpolates through the nodes (3D). x = random.rand(50, 1)*4 - 2 y = random.rand(50, 1)*4 - 2 z = random.rand(50, 1)*4 - 2 d = x*exp(-x**2 - y**2) rbf = Rbf(x, y, z, d, epsilon=2, function=function) di = rbf(x, y, z) di.shape = x.shape assert_array_almost_equal(di, d) def test_rbf_interpolation(): for function in FUNCTIONS: yield check_rbf1d_interpolation, function yield check_rbf2d_interpolation, function yield check_rbf3d_interpolation, function def check_rbf1d_regularity(function, atol): # Check that the Rbf function approximates a smooth function well away # from the nodes. x = linspace(0, 10, 9) y = sin(x) rbf = Rbf(x, y, function=function) xi = linspace(0, 10, 100) yi = rbf(xi) # import matplotlib.pyplot as plt # plt.figure() # plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-') # plt.plot(x, y, 'o', xi, yi-sin(xi), ':') # plt.title(function) # plt.show() msg = "abs-diff: %f" % abs(yi - sin(xi)).max() assert_(allclose(yi, sin(xi), atol=atol), msg) def test_rbf_regularity(): tolerances = { 'multiquadric': 0.1, 'inverse multiquadric': 0.15, 'gaussian': 0.15, 'cubic': 0.15, 'quintic': 0.1, 'thin-plate': 0.1, 'linear': 0.2 } for function in FUNCTIONS: yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2) def check_rbf1d_stability(function): # Check that the Rbf function with default epsilon is not subject # to overshoot. Regression for issue #4523. # # Generate some data (fixed random seed hence deterministic) np.random.seed(1234) x = np.linspace(0, 10, 50) z = x + 4.0 * np.random.randn(len(x)) rbf = Rbf(x, z, function=function) xi = np.linspace(0, 10, 1000) yi = rbf(xi) # subtract the linear trend and make sure there no spikes assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1) def test_rbf_stability(): for function in FUNCTIONS: yield check_rbf1d_stability, function def test_default_construction(): # Check that the Rbf class can be constructed with the default # multiquadric basis function. Regression test for ticket #1228. x = linspace(0,10,9) y = sin(x) rbf = Rbf(x, y) yi = rbf(x) assert_array_almost_equal(y, yi) def test_function_is_callable(): # Check that the Rbf class can be constructed with function=callable. x = linspace(0,10,9) y = sin(x) linfunc = lambda x:x rbf = Rbf(x, y, function=linfunc) yi = rbf(x) assert_array_almost_equal(y, yi) def test_two_arg_function_is_callable(): # Check that the Rbf class can be constructed with a two argument # function=callable. def _func(self, r): return self.epsilon + r x = linspace(0,10,9) y = sin(x) rbf = Rbf(x, y, function=_func) yi = rbf(x) assert_array_almost_equal(y, yi) def test_rbf_epsilon_none(): x = linspace(0, 10, 9) y = sin(x) rbf = Rbf(x, y, epsilon=None) def test_rbf_epsilon_none_collinear(): # Check that collinear points in one dimension doesn't cause an error # due to epsilon = 0 x = [1, 2, 3] y = [4, 4, 4] z = [5, 6, 7] rbf = Rbf(x, y, z, epsilon=None) assert_(rbf.epsilon > 0) if __name__ == "__main__": run_module_suite()
bsd-3-clause
jjx02230808/project0223
examples/applications/plot_outlier_detection_housing.py
243
5577
""" ==================================== Outlier detection on a real data set ==================================== This example illustrates the need for robust covariance estimation on a real data set. It is useful both for outlier detection and for a better understanding of the data structure. We selected two sets of two variables from the Boston housing data set as an illustration of what kind of analysis can be done with several outlier detection tools. For the purpose of visualization, we are working with two-dimensional examples, but one should be aware that things are not so trivial in high-dimension, as it will be pointed out. In both examples below, the main result is that the empirical covariance estimate, as a non-robust one, is highly influenced by the heterogeneous structure of the observations. Although the robust covariance estimate is able to focus on the main mode of the data distribution, it sticks to the assumption that the data should be Gaussian distributed, yielding some biased estimation of the data structure, but yet accurate to some extent. The One-Class SVM algorithm First example ------------- The first example illustrates how robust covariance estimation can help concentrating on a relevant cluster when another one exists. Here, many observations are confounded into one and break down the empirical covariance estimation. Of course, some screening tools would have pointed out the presence of two clusters (Support Vector Machines, Gaussian Mixture Models, univariate outlier detection, ...). But had it been a high-dimensional example, none of these could be applied that easily. Second example -------------- The second example shows the ability of the Minimum Covariance Determinant robust estimator of covariance to concentrate on the main mode of the data distribution: the location seems to be well estimated, although the covariance is hard to estimate due to the banana-shaped distribution. Anyway, we can get rid of some outlying observations. The One-Class SVM is able to capture the real data structure, but the difficulty is to adjust its kernel bandwidth parameter so as to obtain a good compromise between the shape of the data scatter matrix and the risk of over-fitting the data. """ print(__doc__) # Author: Virgile Fritsch <virgile.fritsch@inria.fr> # License: BSD 3 clause import numpy as np from sklearn.covariance import EllipticEnvelope from sklearn.svm import OneClassSVM import matplotlib.pyplot as plt import matplotlib.font_manager from sklearn.datasets import load_boston # Get data X1 = load_boston()['data'][:, [8, 10]] # two clusters X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped # Define "classifiers" to be used classifiers = { "Empirical Covariance": EllipticEnvelope(support_fraction=1., contamination=0.261), "Robust Covariance (Minimum Covariance Determinant)": EllipticEnvelope(contamination=0.261), "OCSVM": OneClassSVM(nu=0.261, gamma=0.05)} colors = ['m', 'g', 'b'] legend1 = {} legend2 = {} # Learn a frontier for outlier detection with several classifiers xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500)) xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500)) for i, (clf_name, clf) in enumerate(classifiers.items()): plt.figure(1) clf.fit(X1) Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()]) Z1 = Z1.reshape(xx1.shape) legend1[clf_name] = plt.contour( xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i]) plt.figure(2) clf.fit(X2) Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()]) Z2 = Z2.reshape(xx2.shape) legend2[clf_name] = plt.contour( xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i]) legend1_values_list = list( legend1.values() ) legend1_keys_list = list( legend1.keys() ) # Plot the results (= shape of the data points cloud) plt.figure(1) # two clusters plt.title("Outlier detection on a real data set (boston housing)") plt.scatter(X1[:, 0], X1[:, 1], color='black') bbox_args = dict(boxstyle="round", fc="0.8") arrow_args = dict(arrowstyle="->") plt.annotate("several confounded points", xy=(24, 19), xycoords="data", textcoords="data", xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args) plt.xlim((xx1.min(), xx1.max())) plt.ylim((yy1.min(), yy1.max())) plt.legend((legend1_values_list[0].collections[0], legend1_values_list[1].collections[0], legend1_values_list[2].collections[0]), (legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]), loc="upper center", prop=matplotlib.font_manager.FontProperties(size=12)) plt.ylabel("accessibility to radial highways") plt.xlabel("pupil-teacher ratio by town") legend2_values_list = list( legend2.values() ) legend2_keys_list = list( legend2.keys() ) plt.figure(2) # "banana" shape plt.title("Outlier detection on a real data set (boston housing)") plt.scatter(X2[:, 0], X2[:, 1], color='black') plt.xlim((xx2.min(), xx2.max())) plt.ylim((yy2.min(), yy2.max())) plt.legend((legend2_values_list[0].collections[0], legend2_values_list[1].collections[0], legend2_values_list[2].collections[0]), (legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]), loc="upper center", prop=matplotlib.font_manager.FontProperties(size=12)) plt.ylabel("% lower status of the population") plt.xlabel("average number of rooms per dwelling") plt.show()
bsd-3-clause
EVEprosper/vincent_lexicon
setup.py
1
3918
"""Project setup for vincent_lexicon""" from os import path, listdir from setuptools import setup, find_packages from setuptools.command.test import test as TestCommand import importlib HERE = path.abspath(path.dirname(__file__)) def get_version(package_name): """find __version__ for making package Args: package_path (str): path to _version.py folder (abspath > relpath) Returns: (str) __version__ value """ module = package_name + '._version' package = importlib.import_module(module) version = package.__version__ return version def hack_find_packages(include_str): """patches setuptools.find_packages issue setuptools.find_packages(path='') doesn't work as intended Returns: (:obj:`list` :obj:`str`) append <include_str>. onto every element of setuptools.find_pacakges() call """ new_list = [include_str] for element in find_packages(include_str): new_list.append(include_str + '.' + element) return new_list def include_all_subfiles(*args): """Slurps up all files in a directory (non recursive) for data_files section Note: Not recursive, only includes flat files Returns: (:obj:`list` :obj:`str`) list of all non-directories in a file """ file_list = [] for path_included in args: local_path = path.join(HERE, path_included) for file in listdir(local_path): file_abspath = path.join(local_path, file) if path.isdir(file_abspath): #do not include sub folders continue file_list.append(path_included + '/' + file) return file_list class PyTest(TestCommand): """PyTest cmdclass hook for test-at-buildtime functionality http://doc.pytest.org/en/latest/goodpractices.html#manual-integration """ user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = [ 'Tests', '--cov=' + __package_name__, '--cov-report=term-missing' ] #load defaults here def run_tests(self): import shlex #import here, cause outside the eggs aren't loaded import pytest pytest_commands = [] try: #read commandline pytest_commands = shlex.split(self.pytest_args) except AttributeError: #use defaults pytest_commands = self.pytest_args errno = pytest.main(pytest_commands) exit(errno) __package_name__ = 'vincent_lexicon' __version__ = get_version(__package_name__) setup( name='vincent_lexicon', author='John Purcell', author_email='prospermarketshow@gmail.com', url='https://github.com/EVEprosper/' + __package_name__, download_url='https://github.com/EVEprosper/' + __package_name__ + '/tarball/v' + __version__, version=__version__, license='MIT', classifiers=[ 'Programming Language :: Python :: 3.5' ], keywords='NLTK market lexicon library', packages=find_packages(), include_package_data=True, data_files=[ #Can't use data_files with gemfury upload (need `bdist_wheel`) ('Tests', include_all_subfiles('Tests')), ('Docs', include_all_subfiles('Docs')), ('Scripts', include_all_subfiles('Scripts')) ], package_data={ __package_name__:[ 'vincent_config.cfg', 'ticker_list.csv' ] }, install_requires=[ 'requests~=2.18.2', 'pandas-datareader~=0.5.0', 'ProsperCommon~=1.0.2', 'ujson~=1.35', 'tinydb~=3.3.1', 'nltk~=3.2.2', 'demjson~=2.2.4', 'plumbum~=1.6.3' ], tests_require=[ 'pytest>=3.0.0', 'testfixtures>=4.12.0', 'pytest_cov>=2.4.0' ], cmdclass={ 'test':PyTest } )
apache-2.0
Ryanglambert/pybrain
examples/supervised/evolino/superimposed_sine.py
25
3496
from __future__ import print_function #!/usr/bin/env python __author__ = 'Michael Isik' from pylab import plot, show, ion, cla, subplot, title, figlegend, draw import numpy from pybrain.structure.modules.evolinonetwork import EvolinoNetwork from pybrain.supervised.trainers.evolino import EvolinoTrainer from lib.data_generator import generateSuperimposedSineData print() print("=== Learning to extrapolate 5 superimposed sine waves ===") print() sinefreqs = ( 0.2, 0.311, 0.42, 0.51, 0.74 ) # sinefreqs = ( 0.2, 0.311, 0.42, 0.51, 0.74, 0.81 ) metascale = 8. scale = 0.5 * metascale stepsize = 0.1 * metascale # === create training dataset # the sequences must be stored in the target field # the input field will be ignored print("creating training data") trnInputSpace = numpy.arange( 0*scale , 190*scale , stepsize ) trnData = generateSuperimposedSineData(sinefreqs, trnInputSpace) # === create testing dataset print("creating test data") tstInputSpace = numpy.arange( 400*scale , 540*scale , stepsize) tstData = generateSuperimposedSineData(sinefreqs, tstInputSpace) # === create the evolino-network print("creating EvolinoNetwork") net = EvolinoNetwork( trnData.outdim, 40 ) wtRatio = 1./3. # === instantiate an evolino trainer # it will train our network through evolutionary algorithms print("creating EvolinoTrainer") trainer = EvolinoTrainer( net, dataset=trnData, subPopulationSize = 20, nParents = 8, nCombinations = 1, initialWeightRange = ( -0.01 , 0.01 ), # initialWeightRange = ( -0.1 , 0.1 ), # initialWeightRange = ( -0.5 , -0.2 ), backprojectionFactor = 0.001, mutationAlpha = 0.001, # mutationAlpha = 0.0000001, nBurstMutationEpochs = numpy.Infinity, wtRatio = wtRatio, verbosity = 2) # === prepare sequences for extrapolation and plotting trnSequence = trnData.getField('target') separatorIdx = int(len(trnSequence)*wtRatio) trnSequenceWashout = trnSequence[0:separatorIdx] trnSequenceTarget = trnSequence[separatorIdx:] tstSequence = tstData.getField('target') separatorIdx = int(len(tstSequence)*wtRatio) tstSequenceWashout = tstSequence[0:separatorIdx] tstSequenceTarget = tstSequence[separatorIdx:] ion() # switch matplotlib to interactive mode for i in range(3000): print("======================") print("====== NEXT RUN ======") print("======================") print("=== TRAINING") # train the network for 1 epoch trainer.trainEpochs( 1 ) print("=== PLOTTING\n") # calculate the nets output for train and the test data trnSequenceOutput = net.extrapolate(trnSequenceWashout, len(trnSequenceTarget)) tstSequenceOutput = net.extrapolate(tstSequenceWashout, len(tstSequenceTarget)) # plot training data sp = subplot(211) # switch to the first subplot cla() # clear the subplot title("Training Set") # set the subplot's title sp.set_autoscale_on( True ) # enable autoscaling targetline = plot(trnSequenceTarget,"r-") # plot the targets sp.set_autoscale_on( False ) # disable autoscaling outputline = plot(trnSequenceOutput,"b-") # plot the actual output # plot test data sp = subplot(212) cla() title("Test Set") sp.set_autoscale_on( True ) plot(tstSequenceTarget,"r-") sp.set_autoscale_on( False ) plot(tstSequenceOutput,"b-") # create a legend figlegend((targetline, outputline),('target','output'),('upper right')) # draw everything draw() show()
bsd-3-clause
rougier/Neurosciences
basal-ganglia/topalidou-et-al-2014/model.py
1
10880
#!/usr/bin/env python # ----------------------------------------------------------------------------- # Copyright (c) 2014, Nicolas P. Rougier # Distributed under the (new) BSD License. # # Contributors: Nicolas P. Rougier (Nicolas.Rougier@inria.fr) # Meropi Topalidou (Meropi.Topalidou@inria.fr) # ----------------------------------------------------------------------------- from dana import * import matplotlib.pyplot as plt # Parameters # ----------------------------------------------------------------------------- # Population size n = 4 # Default trial duration duration = 3.0*second # Default Time resolution dt = 1.0*millisecond # Initialization of the random generator (reproductibility !) # np.random.seed(1) # Sigmoid parameter Vmin = 0.0 Vmax = 20.0 Vh = 16.0 Vc = 3.0 # Thresholds Cortex_h = -3.0 Striatum_h = 0.0 STN_h = -10.0 GPi_h = 10.0 Thalamus_h = -40.0 # Time constants Cortex_tau = 0.01 Striatum_tau = 0.01 STN_tau = 0.01 GPi_tau = 0.01 Thalamus_tau = 0.01 # Noise level (%) Cortex_N = 0.01 Striatum_N = 0.001 STN_N = 0.001 GPi_N = 0.03 Thalamus_N = 0.001 # Helper functions # ----------------------------------------------------------------------------- def sigmoid(V,Vmin=Vmin,Vmax=Vmax,Vh=Vh,Vc=Vc): return Vmin + (Vmax-Vmin)/(1.0+np.exp((Vh-V)/Vc)) def noise(Z, level): Z = (1+np.random.uniform(-level/2,level/2,Z.shape))*Z return np.maximum(Z,0.0) def init_weights(L, gain=1): Wmin, Wmax = 0.25, 0.75 W = L._weights N = np.random.normal(0.5, 0.005, W.shape) N = np.minimum(np.maximum(N, 0.0),1.0) L._weights = gain*W*(Wmin + (Wmax - Wmin)*N) # Populations # ----------------------------------------------------------------------------- Cortex_cog = zeros((n,1), """dV/dt = (-V + I + L + Iext - Cortex_h)/Cortex_tau; U = noise(V,Cortex_N); L; I; Iext""") Cortex_mot = zeros((1,n), """dV/dt = (-V + I + L + Iext - Cortex_h)/Cortex_tau; U = noise(V,Cortex_N); L; I; Iext""") Cortex_ass = zeros((n,n), """dV/dt = (-V + I + Iext - Cortex_h)/Cortex_tau; U = noise(V,Cortex_N); I; Iext""") Striatum_cog = zeros((n,1), """dV/dt = (-V + I - Striatum_h)/Striatum_tau; U = noise(sigmoid(V), Striatum_N); I""") Striatum_mot = zeros((1,n), """dV/dt = (-V + I - Striatum_h)/Striatum_tau; U = noise(sigmoid(V), Striatum_N); I""") Striatum_ass = zeros((n,n), """dV/dt = (-V + I - Striatum_h)/Striatum_tau; U = noise(sigmoid(V), Striatum_N); I""") STN_cog = zeros((n,1), """dV/dt = (-V + I - STN_h)/STN_tau; U = noise(V,STN_N); I""") STN_mot = zeros((1,n), """dV/dt = (-V + I - STN_h)/STN_tau; U = noise(V,STN_N); I""") GPi_cog = zeros((n,1), """dV/dt = (-V + I - GPi_h)/GPi_tau; U = noise(V,GPi_N); I""") GPi_mot = zeros((1,n), """dV/dt = (-V + I - GPi_h)/GPi_tau; U = noise(V,GPi_N); I""") Thalamus_cog = zeros((n,1), """dV/dt = (-V + I - Thalamus_h)/Thalamus_tau; U = noise(V,Thalamus_N); I""") Thalamus_mot = zeros((1,n), """dV/dt = (-V + I - Thalamus_h)/Thalamus_tau; U = noise(V, Thalamus_N); I""") # Connectivity # ----------------------------------------------------------------------------- if 1: L = DenseConnection( Cortex_cog('U'), Striatum_cog('I'), 1.0) init_weights(L) L = DenseConnection( Cortex_mot('U'), Striatum_mot('I'), 1.0) init_weights(L) L = DenseConnection( Cortex_ass('U'), Striatum_ass('I'), 1.0) init_weights(L) L = DenseConnection( Cortex_cog('U'), Striatum_ass('I'), np.ones((1,2*n+1))) init_weights(L,0.2) L = DenseConnection( Cortex_mot('U'), Striatum_ass('I'), np.ones((2*n+1,1))) init_weights(L,0.2) DenseConnection( Cortex_cog('U'), STN_cog('I'), 1.0 ) DenseConnection( Cortex_mot('U'), STN_mot('I'), 1.0 ) DenseConnection( Striatum_cog('U'), GPi_cog('I'), -2.0 ) DenseConnection( Striatum_mot('U'), GPi_mot('I'), -2.0 ) DenseConnection( Striatum_ass('U'), GPi_cog('I'), -2.0*np.ones((1,2*n+1))) DenseConnection( Striatum_ass('U'), GPi_mot('I'), -2.0*np.ones((2*n+1,1))) DenseConnection( STN_cog('U'), GPi_cog('I'), 1.0*np.ones((2*n+1,1)) ) DenseConnection( STN_mot('U'), GPi_mot('I'), 1.0*np.ones((1,2*n+1)) ) DenseConnection( GPi_cog('U'), Thalamus_cog('I'), -0.5 ) DenseConnection( GPi_mot('U'), Thalamus_mot('I'), -0.5 ) DenseConnection( Thalamus_cog('U'), Cortex_cog('I'), 0.4 ) DenseConnection( Thalamus_mot('U'), Cortex_mot('I'), 0.4 ) DenseConnection( Cortex_cog('U'), Thalamus_cog('I'), 0.1 ) DenseConnection( Cortex_mot('U'), Thalamus_mot('I'), 0.1 ) K = -np.ones((2*n+1,1)) * 0.5 K[n,0] = +0.5 DenseConnection( Cortex_cog('U'), Cortex_cog('L'), K) K = -np.ones((1,2*n+1)) * 0.5 K[0,n] = +0.5 DenseConnection( Cortex_mot('U'), Cortex_mot('L'), K) # Trial setup # ----------------------------------------------------------------------------- @clock.at(500*millisecond) def set_trial(t): m1,m2 = np.random.randint(0,4,2) while m2 == m1: m2 = np.random.randint(4) c1,c2 = np.random.randint(0,4,2) while c2 == c1: c2 = np.random.randint(4) Cortex_mot['Iext'] = 0 Cortex_cog['Iext'] = 0 Cortex_ass['Iext'] = 0 v = 7 Cortex_mot['Iext'][0,m1] = v + np.random.normal(0,v*Cortex_N) Cortex_mot['Iext'][0,m2] = v + np.random.normal(0,v*Cortex_N) Cortex_cog['Iext'][c1,0] = v + np.random.normal(0,v*Cortex_N) Cortex_cog['Iext'][c2,0] = v + np.random.normal(0,v*Cortex_N) Cortex_ass['Iext'][c1,m1] = v + np.random.normal(0,v*Cortex_N) Cortex_ass['Iext'][c2,m2] = v + np.random.normal(0,v*Cortex_N) @clock.at(2500*millisecond) def set_trial(t): Cortex_mot['Iext'] = 0 Cortex_cog['Iext'] = 0 Cortex_ass['Iext'] = 0 # Measurements # ----------------------------------------------------------------------------- size = int(duration/dt) timesteps = np.zeros(size) motor = np.zeros((5, n, size)) cognitive = np.zeros((5, n, size)) associative = np.zeros((2, n*n, size)) @after(clock.tick) def register(t): index = int(t*1000) timesteps[index] = t motor[0,:,index] = Cortex_mot['U'].ravel() motor[1,:,index] = Striatum_mot['U'].ravel() motor[2,:,index] = STN_mot['U'].ravel() motor[3,:,index] = GPi_mot['U'].ravel() motor[4,:,index] = Thalamus_mot['U'].ravel() cognitive[0,:,index] = Cortex_cog['U'].ravel() cognitive[1,:,index] = Striatum_cog['U'].ravel() cognitive[2,:,index] = STN_cog['U'].ravel() cognitive[3,:,index] = GPi_cog['U'].ravel() cognitive[4,:,index] = Thalamus_cog['U'].ravel() associative[0,:,index] = Cortex_ass['U'].ravel() associative[1,:,index] = Striatum_ass['U'].ravel() # Simulation # ----------------------------------------------------------------------------- run(time=duration, dt=dt) # Display 1 # ----------------------------------------------------------------------------- if 1: fig = plt.figure(figsize=(12,5)) plt.subplots_adjust(bottom=0.15) fig.patch.set_facecolor('.9') ax = plt.subplot(1,1,1) plt.plot(timesteps, cognitive[0,0],c='r', label="Cognitive Cortex") plt.plot(timesteps, cognitive[0,1],c='r') plt.plot(timesteps, cognitive[0,2],c='r') plt.plot(timesteps, cognitive[0,3],c='r') plt.plot(timesteps, motor[0,0],c='b', label="Motor Cortex") plt.plot(timesteps, motor[0,1],c='b') plt.plot(timesteps, motor[0,2],c='b') plt.plot(timesteps, motor[0,3],c='b') plt.xlabel("Time (seconds)") plt.ylabel("Activity (Hz)") plt.legend(frameon=False, loc='upper left') plt.xlim(0.0,duration) plt.ylim(-5.0,80.0) plt.xticks([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0], ['0.0','0.5\n(Trial start)','1.0','1.5', '2.0','2.5\n(Trial stop)','3.0']) plt.savefig("model-without-gpi.pdf") plt.show() # Display 2 # ----------------------------------------------------------------------------- if 0: fig = plt.figure(figsize=(18,12)) fig.patch.set_facecolor('1.0') def subplot(rows,cols,n, alpha=0.0): ax = plt.subplot(rows,cols,n) ax.patch.set_facecolor("k") ax.patch.set_alpha(alpha) ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.spines['bottom'].set_color('none') ax.yaxis.set_ticks_position('left') ax.yaxis.set_tick_params(direction="outward") return ax ax = subplot(5,3,1) ax.set_title("MOTOR", fontsize=24) ax.set_ylabel("STN", fontsize=24) for i in range(4): plt.plot(timesteps, motor[2,i], c='k', lw=.5) ax.set_xticks([]) ax = subplot(5,3,2) ax.set_title("COGNITIVE", fontsize=24) for i in range(4): plt.plot(timesteps, cognitive[2,i], c='k', lw=.5) ax.set_xticks([]) ax = subplot(5,3,3,alpha=0) ax.set_title("ASSOCIATIVE", fontsize=24) ax.set_xticks([]) ax.set_yticks([]) ax.spines['left'].set_color('none') ax = subplot(5,3,4) ax.set_ylabel("CORTEX", fontsize=24) for i in range(4): ax.plot(timesteps, motor[0,i], c='k', lw=.5) ax.set_xticks([]) ax = subplot(5,3,5) for i in range(4): plt.plot(timesteps, cognitive[0,i], c='k', lw=.5) ax.set_xticks([]) ax = subplot(5,3,6) for i in range(16): plt.plot(timesteps, associative[0,i], c='k', lw=.5) ax.set_xticks([]) ax = subplot(5,3,7) ax.set_ylabel("STRIATUM", fontsize=24) for i in range(4): plt.plot(timesteps, motor[1,i], c='k', lw=.5) ax.set_xticks([]) ax = subplot(5,3,8) for i in range(4): plt.plot(timesteps, cognitive[1,i], c='k', lw=.5) ax.set_xticks([]) ax = subplot(5,3,9) for i in range(16): plt.plot(timesteps, associative[1,i], c='k', lw=.5) ax.set_xticks([]) ax = subplot(5,3,10) ax.set_ylabel("GPi", fontsize=24) for i in range(4): plt.plot(timesteps, motor[3,i], c='k', lw=.5) ax.set_xticks([]) ax = subplot(5,3,11) for i in range(4): plt.plot(timesteps, cognitive[3,i], c='k', lw=.5) ax.set_xticks([]) ax = subplot(5,3,13) ax.set_ylabel("THALAMUS", fontsize=24) for i in range(4): plt.plot(timesteps, motor[4,i], c='k', lw=.5) ax.set_xticks([]) ax = subplot(5,3,14) for i in range(4): plt.plot(timesteps, cognitive[4,i], c='k', lw=.5) ax.set_xticks([]) plt.savefig("model-results-all.pdf") plt.show()
bsd-3-clause
cchauve/lrcstats
src/sanity_checks/maf_stats.py
2
2823
from __future__ import division import getopt import sys import numpy as np import matplotlib as mpl mpl.use('agg') import matplotlib.pyplot as plt def getAlignedBases(read): ''' Given a read alignment, returns the number of non-'-' chars. ''' bases = 0 for char in read: if char != "-": bases += 1 return bases def getIdentity(ref, read): ''' Given a reference and read alignment, returns the number of identical bases between the two. ''' assert len(ref) == len(read) length = len(ref) totalIdentity = 0 for i in range(length): if ref[i] == read[i]: totalIdentity += 1 return totalIdentity def makeLengthIdentityScatterPlot(accuracyRates, lengths, datasetName, outputPrefix): ''' Creates a scatter plot where the x-axis is length and y-axis is the identity of th read ''' fig, axes = plt.subplots() axes.scatter(lengths, accuracyRates) # Add labels axes.set_ylabel("Identity of Read") axes.set_xlabel("Length of Read") axes.set_title("Length vs Identity of Dataset %s" % (datasetName)) savePath = "%s_length_identity_scatter.png" % (outputPrefix) fig.savefig(savePath, bbox_inches='tight') helpMessage = "Output a file with statistics about MAF file." usageMessage = "Usage: %s [-h help and usage] [-i MAF input prefix] [-o output prefix] [-n dataset name]" % (sys.argv[0]) options = "hi:o:n:" try: opts, args = getopt.getopt(sys.argv[1:], options) except getopt.GetoptError: print "Error: unable to read command line arguments." sys.exit(2) if len(sys.argv) == 1: print usageMessage sys.exit(2) inputPath = None outputPrefix = None datasetName = None for opt, arg in opts: if opt == '-h': print helpMessage print usageMessage sys.exit() elif opt == '-i': inputPath = arg optsIncomplete = False if inputPath is None: optsIncomplete = True print("Please provide the path to the MAF file.") if optsIncomplete: print usageMessage sys.exit(2) alignedReads = 0 totalLength = 0 totalIdentity = 0 accuracyRates = [] lengths = [] ref = None read = None with open(inputPath,'r') as input: for line in input: if len(line.rstrip()) > 0: line = line.split() if line[0] == "a": ref = None read = None elif line[0] == "s" and line[1] == "ref": ref = line[6] else: read = line[6] alignedReads += 1 totalLength += len(read) length = getAlignedBases(read) totalAlignedBases += length lengths.append(length) identity = getIdentity(ref, read) totalIdentity += identity accuracyRate = identity/length accuracyRates.append(accuracyRate) #makeLengthIdentityScatterPlot(accuracyRates, lengths, datasetName, outputPrefix) errorRate = (totalIdentity-totalLength)/totalLength print( "Error Rate = %d\n" % (errorRate) )
gpl-3.0
rbalda/neural_ocr
env/lib/python2.7/site-packages/pybrain/tools/neuralnets.py
3
13927
# Neural network data analysis tool collection. Makes heavy use of the logging module. # Can generate training curves during the run (from properly setup IPython and/or with # TkAgg backend and interactive mode - see matplotlib documentation). __author__ = "Martin Felder" __version__ = "$Id$" from pylab import ion, figure, draw import csv from numpy import Infinity import logging from pybrain.datasets import ClassificationDataSet, SequentialDataSet from pybrain.tools.shortcuts import buildNetwork from pybrain.supervised import BackpropTrainer, RPropMinusTrainer, Trainer from pybrain.structure import SoftmaxLayer, LSTMLayer from pybrain.utilities import setAllArgs from pybrain.tools.plotting import MultilinePlotter from pybrain.tools.validation import testOnSequenceData, ModuleValidator, Validator from pybrain.tools.xml import NetworkWriter class NNtools(object): """ Abstract class providing basic functionality to make neural network training more comfortable """ def __init__(self, DS, **kwargs): """ Initialize with the training data set DS. All keywords given are set as member variables. The following are particularly important: :key hidden: number of hidden units :key TDS: test data set for checking convergence :key VDS: validation data set for final performance evaluation :key epoinc: number of epochs to train for, before checking convergence (default: 5) """ self.DS = DS self.hidden = 10 self.maxepochs = 1000 self.Graph = None self.TDS = None self.VDS = None self.epoinc = 5 setAllArgs(self, kwargs) self.trainCurve = None def initGraphics(self, ymax=10, xmax= -1): """ initialize the interactive graphics output window, and return a handle to the plot """ if xmax < 0: xmax = self.maxepochs figure(figsize=[12, 8]) ion() draw() #self.Graph = MultilinePlotter(autoscale=1.2 ) #xlim=[0, self.maxepochs], ylim=[0, ymax]) self.Graph = MultilinePlotter(xlim=[0, xmax], ylim=[0, ymax]) self.Graph.setLineStyle([0, 1], linewidth=2) return self.Graph def set(self, **kwargs): """ convenience method to set several member variables at once """ setAllArgs(self, kwargs) def saveTrainingCurve(self, learnfname): """ save the training curves into a file with the given name (CSV format) """ logging.info('Saving training curves into ' + learnfname) if self.trainCurve is None: logging.error('No training curve available for saving!') learnf = open(learnfname, "wb") writer = csv.writer(learnf, dialect='excel') nDataSets = len(self.trainCurve) for i in range(1, len(self.trainCurve[0]) - 1): writer.writerow([self.trainCurve[k][i] for k in range(nDataSets)]) learnf.close() def saveNetwork(self, fname): """ save the trained network to a file """ NetworkWriter.writeToFile(self.Trainer.module, fname) logging.info("Network saved to: " + fname) #======================================================================================================= class NNregression(NNtools): """ Learns to numerically predict the targets of a set of data, with optional online progress plots. """ def setupNN(self, trainer=RPropMinusTrainer, hidden=None, **trnargs): """ Constructs a 3-layer FNN for regression. Optional arguments are passed on to the Trainer class. """ if hidden is not None: self.hidden = hidden logging.info("Constructing FNN with following config:") FNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim) logging.info(str(FNN) + "\n Hidden units:\n " + str(self.hidden)) logging.info("Training FNN with following special arguments:") logging.info(str(trnargs)) self.Trainer = trainer(FNN, dataset=self.DS, **trnargs) def runTraining(self, convergence=0, **kwargs): """ Trains the network on the stored dataset. If convergence is >0, check after that many epoch increments whether test error is going down again, and stop training accordingly. CAVEAT: No support for Sequential datasets!""" assert isinstance(self.Trainer, Trainer) if self.Graph is not None: self.Graph.setLabels(x='epoch', y='normalized regression error') self.Graph.setLegend(['training', 'test'], loc='upper right') epoch = 0 inc = self.epoinc best_error = Infinity best_epoch = 0 learncurve_x = [0] learncurve_y = [0.0] valcurve_y = [0.0] converged = False convtest = 0 if convergence > 0: logging.info("Convergence criterion: %d batches of %d epochs w/o improvement" % (convergence, inc)) while epoch <= self.maxepochs and not converged: self.Trainer.trainEpochs(inc) epoch += inc learncurve_x.append(epoch) # calculate errors on TRAINING data err_trn = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.DS) learncurve_y.append(err_trn) if self.TDS is None: logging.info("epoch: %6d, err_trn: %10g" % (epoch, err_trn)) else: # calculate same errors on TEST data err_tst = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.TDS) valcurve_y.append(err_tst) if err_tst < best_error: # store best error and parameters best_epoch = epoch best_error = err_tst bestweights = self.Trainer.module.params.copy() convtest = 0 else: convtest += 1 logging.info("epoch: %6d, err_trn: %10g, err_tst: %10g, best_tst: %10g" % (epoch, err_trn, err_tst, best_error)) if self.Graph is not None: self.Graph.addData(1, epoch, err_tst) # check if convegence criterion is fulfilled (no improvement after N epoincs) if convtest >= convergence: converged = True if self.Graph is not None: self.Graph.addData(0, epoch, err_trn) self.Graph.update() # training finished! logging.info("Best epoch: %6d, with error: %10g" % (best_epoch, best_error)) if self.VDS is not None: # calculate same errors on VALIDATION data self.Trainer.module.params[:] = bestweights.copy() err_val = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.VDS) logging.info("Result on evaluation data: %10g" % err_val) # store training curve for saving into file self.trainCurve = (learncurve_x, learncurve_y, valcurve_y) #======================================================================================================= class NNclassifier(NNtools): """ Learns to classify a set of data, with optional online progress plots. """ def __init__(self, DS, **kwargs): """ Initialize the classifier: the least we need is the dataset to be classified. All keywords given are set as member variables. """ if not isinstance(DS, ClassificationDataSet): raise TypeError, 'Need a ClassificationDataSet to do classification!' NNtools.__init__(self, DS, **kwargs) self.nClasses = self.DS.nClasses # need this because targets may be altered later self.clsnames = None self.targetsAreOneOfMany = False def _convertAllDataToOneOfMany(self, values=[0, 1]): """ converts all datasets associated with self into 1-out-of-many representations, e.g. with original classes 0 to 4, the new target for class 1 would be [0,1,0,0,0], or accordingly with other upper and lower bounds, as given by the values keyword """ if self.targetsAreOneOfMany: return else: # convert all datasets to one-of-many ("winner takes all") representation for dsname in ["DS", "TDS", "VDS"]: d = getattr(self, dsname) if d is not None: if d.outdim < d.nClasses: d._convertToOneOfMany(values) self.targetsAreOneOfMany = True def setupNN(self, trainer=RPropMinusTrainer, hidden=None, **trnargs): """ Setup FNN and trainer for classification. """ self._convertAllDataToOneOfMany() if hidden is not None: self.hidden = hidden FNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim, outclass=SoftmaxLayer) logging.info("Constructing classification FNN with following config:") logging.info(str(FNN) + "\n Hidden units:\n " + str(self.hidden)) logging.info("Trainer received the following special arguments:") logging.info(str(trnargs)) self.Trainer = trainer(FNN, dataset=self.DS, **trnargs) def setupRNN(self, trainer=BackpropTrainer, hidden=None, **trnargs): """ Setup an LSTM RNN and trainer for sequence classification. """ if hidden is not None: self.hidden = hidden self._convertAllDataToOneOfMany() RNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim, hiddenclass=LSTMLayer, outclass=SoftmaxLayer) logging.info("Constructing classification RNN with following config:") logging.info(str(RNN) + "\n Hidden units:\n " + str(self.hidden)) logging.info("Trainer received the following special arguments:") logging.info(str(trnargs)) self.Trainer = trainer(RNN, dataset=self.DS, **trnargs) def runTraining(self, convergence=0, **kwargs): """ Trains the network on the stored dataset. If convergence is >0, check after that many epoch increments whether test error is going down again, and stop training accordingly. """ assert isinstance(self.Trainer, Trainer) if self.Graph is not None: self.Graph.setLabels(x='epoch', y='% classification error') self.Graph.setLegend(['training', 'test'], loc='lower right') epoch = 0 inc = self.epoinc best_error = 100.0 best_epoch = 0 learncurve_x = [0] learncurve_y = [0.0] valcurve_y = [0.0] converged = False convtest = 0 if convergence > 0: logging.info("Convergence criterion: %d batches of %d epochs w/o improvement" % (convergence, inc)) while epoch <= self.maxepochs and not converged: self.Trainer.trainEpochs(inc) epoch += inc learncurve_x.append(epoch) # calculate errors on TRAINING data if isinstance(self.DS, SequentialDataSet): r_trn = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.DS)) else: # FIXME: messy - validation does not belong into the Trainer... out, trueclass = self.Trainer.testOnClassData(return_targets=True) r_trn = 100. * (1.0 - Validator.classificationPerformance(out, trueclass)) learncurve_y.append(r_trn) if self.TDS is None: logging.info("epoch: %6d, err_trn: %5.2f%%" % (epoch, r_trn)) else: # calculate errors on TEST data if isinstance(self.DS, SequentialDataSet): r_tst = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.TDS)) else: # FIXME: messy - validation does not belong into the Trainer... out, trueclass = self.Trainer.testOnClassData(return_targets=True, dataset=self.TDS) r_tst = 100. * (1.0 - Validator.classificationPerformance(out, trueclass)) valcurve_y.append(r_tst) if r_tst < best_error: best_epoch = epoch best_error = r_tst bestweights = self.Trainer.module.params.copy() convtest = 0 else: convtest += 1 logging.info("epoch: %6d, err_trn: %5.2f%%, err_tst: %5.2f%%, best_tst: %5.2f%%" % (epoch, r_trn, r_tst, best_error)) if self.Graph is not None: self.Graph.addData(1, epoch, r_tst) # check if convegence criterion is fulfilled (no improvement after N epoincs) if convtest >= convergence: converged = True if self.Graph is not None: self.Graph.addData(0, epoch, r_trn) self.Graph.update() logging.info("Best epoch: %6d, with error: %5.2f%%" % (best_epoch, best_error)) if self.VDS is not None: # calculate errors on VALIDATION data self.Trainer.module.params[:] = bestweights.copy() if isinstance(self.DS, SequentialDataSet): r_val = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.VDS)) else: out, trueclass = self.Trainer.testOnClassData(return_targets=True, dataset=self.VDS) r_val = 100. * (1.0 - Validator.classificationPerformance(out, trueclass)) logging.info("Result on evaluation data: %5.2f%%" % r_val) self.trainCurve = (learncurve_x, learncurve_y, valcurve_y)
mit
gwpy/gwpy.github.io
docs/latest/plotter/colors-1.py
7
1123
from __future__ import division import numpy from matplotlib import (pyplot, rcParams) from matplotlib.colors import to_hex from gwpy.plotter import colors rcParams.update({ 'text.usetex': False, 'font.size': 15 }) th = numpy.linspace(0, 2*numpy.pi, 512) names = [ 'gwpy:geo600', 'gwpy:kagra', 'gwpy:ligo-hanford', 'gwpy:ligo-india', 'gwpy:ligo-livingston', 'gwpy:virgo', ] fig = pyplot.figure(figsize=(5, 2)) ax = fig.gca() ax.axis('off') for j, name in enumerate(sorted(names)): c = str(to_hex(name)) v_offset = -(j / len(names)) ax.plot(th, .1*numpy.sin(th) + v_offset, color=c) ax.annotate("{!r}".format(name), (0, v_offset), xytext=(-1.5, 0), ha='right', va='center', color=c, textcoords='offset points', family='monospace') ax.annotate("{!r}".format(c), (2*numpy.pi, v_offset), xytext=(1.5, 0), ha='left', va='center', color=c, textcoords='offset points', family='monospace') fig.subplots_adjust(**{'bottom': 0.0, 'left': 0.54, 'right': 0.78, 'top': 1}) pyplot.show()
gpl-3.0
michaelkourlas/gini
frontend/src/gbuilder/UI/GraphWindow.py
11
6860
import numpy import warnings warnings.simplefilter('ignore',numpy.RankWarning) from numpy.lib.polynomial import * from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar from matplotlib.figure import Figure from PyQt4 import QtCore, QtGui from Core.globals import mainWidgets from Dockable import * class RouterQueue: def __init__(self): self.lastIndex = 0 self.x = [] self.y = {"size":[], "rate":[]} def getName(self): return self.name def setName(self, name): self.name = name def inc(self): self.lastIndex += 1 def addPoint(self, ysize, yrate): if len(self.x) > 9: self.x.pop(0) self.y["size"].pop(0) self.y["rate"].pop(0) self.x.append(self.lastIndex) self.y["size"].append(ysize) self.y["rate"].append(yrate) self.inc() def getSizes(self): return self.y["size"] def getRates(self): return self.y["rate"] def getX(self): return self.x class GraphWindow(Dockable): def __init__(self, name, parent = None): """ Create a stats window to view mobile statistics. """ Dockable.__init__(self, "Graph of " + name, parent) self.name = name self.setMinimumSize(600,500) self.resize(600,500) self.setFloating(True) self.setAllowedAreas(QtCore.Qt.NoDockWidgetArea) self.create_main_frame() self.smoothing = False self.lastIndex = 0 self.queues = {"outputQueue":RouterQueue(), "default":RouterQueue() } self.setWidget(self.main_frame) self.timer = QtCore.QTimer() self.connect(self.timer, QtCore.SIGNAL("timeout()"), self.refresh) self.connect(self, QtCore.SIGNAL("topLevelChanged(bool)"), self.dockChanged) self.timer.start(1000) def dockChanged(self, floating): if not floating: self.setFloating(True) def closeEvent(self, event): self.timer.stop() QtGui.QDockWidget.closeEvent(self, event) def refresh(self): client = mainWidgets["client"] if client: client.send("rstats " + self.name) def updateStats(self, queueName, size, rate): if not self.queues.has_key(queueName): self.queues[queueName] = RouterQueue() queue = self.queues[queueName] queue.addPoint(float(size), float(rate)) self.on_draw() def split(self, y): above = False below = False initial = y[0] for i in range(len(y)): if above: if y[i] <= initial/2: return y[:i], y[i:] elif below: if y[i] >= initial/2: return y[:i], y[i:] elif y[i] > initial: above = True elif y[i] < initial: below = True return y, [] def divide(self, y): part1, part2 = self.split(y) parts = [part1] while part2: part1, part2 = self.split(part2) parts.append(part1) return parts def toggleSmooth(self): self.smoothing = not self.smoothing def smooth(self, x, y): if not self.smoothing or len(x) < 2: return x, y parts = self.divide(y) count = 0 tx = [] ty = [] for ypart in parts: xpart = x[count:count+len(ypart)] count += len(ypart) p = polyfit(xpart, ypart, 2) dx = numpy.linspace(xpart[0], xpart[-1], 2*len(xpart)) dy = polyval(p, dx) tx += dx.tolist() ty += dy.tolist() return tx, ty def on_draw(self): """ Redraws the figure """ # clear the axes and redraw the plot anew # for i in range(len(self.queues) % 5): axes = self.axesList[i] queue = self.queues.values()[i] axes.clear() axes.grid(self.grid_cb.isChecked()) axes.set_title(self.queues.keys()[i]) x,y = self.smooth(queue.getX(), queue.getSizes()) x2,y2 = self.smooth(queue.getX(), queue.getRates()) axes.plot( x, y, antialiased=True, lw=3) axes.plot( x2, y2, antialiased=True, lw=3) for canvas in self.canvases: canvas.draw() def create_main_frame(self): self.main_frame = QtGui.QWidget() # Create the mpl Figure and FigCanvas objects. # 5x4 inches, 100 dots-per-inch # self.dpi = 100 self.figs = [] self.canvases = [] self.axesList = [] for i in range(4): fig = Figure((3.0, 2.0), dpi=self.dpi) canvas = FigureCanvas(fig) canvas.setParent(self.main_frame) axes = fig.add_subplot(111) axes.set_ylim(-1.0, 1.0) self.figs.append(fig) self.canvases.append(canvas) self.axesList.append(axes) # Other GUI controls # self.smooth_button = QtGui.QPushButton("&Toggle Smoothing") self.connect(self.smooth_button, QtCore.SIGNAL('clicked()'), self.toggleSmooth) self.grid_cb = QtGui.QCheckBox("Show &Grid") self.grid_cb.setChecked(False) self.connect(self.grid_cb, QtCore.SIGNAL('stateChanged(int)'), self.on_draw) self.legend = QtGui.QLabel("Blue: Queue Sizes\nGreen: Queue Rates") # # Layout with box sizers # hbox = QtGui.QHBoxLayout() for w in [self.grid_cb, self.smooth_button, self.legend]: hbox.addWidget(w) hbox.setAlignment(w, QtCore.Qt.AlignVCenter) vbox = QtGui.QGridLayout() vbox.addWidget(self.canvases[0], 0, 0) vbox.addWidget(self.canvases[1], 0, 1) vbox.addWidget(self.canvases[2], 1, 0) vbox.addWidget(self.canvases[3], 1, 1) vbox.addLayout(hbox, 3, 0) self.main_frame.setLayout(vbox) self.setWidget(self.main_frame)
mit
donK23/pyData-Projects
HolmesTopicModels/holmes_topic_models/preprocessing.py
1
4373
#!/usr/bin/python """ preprocessing Text preprocessing: Data wrangling and text vectorization. Author: datadonk23 Date: 24.10.18 """ import re import unicodedata from sklearn.datasets import load_files from sklearn.feature_extraction import stop_words from sklearn.feature_extraction.text import TfidfVectorizer import nltk nltk.download(["punkt", "stopwords", "wordnet"], download_dir="nltk/") nltk.data.path.append("nltk/") from nltk import wordpunct_tokenize from nltk.corpus import stopwords from nltk.stem.lancaster import LancasterStemmer from nltk.stem.wordnet import WordNetLemmatizer class TextWrangler(object): """ Clean and tokenize documents. Tokenize documents. Removes punctuation and stopwords. Converts tokens to lowercase. Replaces numbers with generic number token. Depending on `kind` parameter, stems or lemmatizes tokens. Parameters ---------- kind : str Either "stem" (default) for stemming or "lemma" for lemmatization """ def __init__(self, kind="stem"): self.kind = kind self.stemmer = LancasterStemmer() self.lemmatizer = WordNetLemmatizer() nltk_stopwords = set(stopwords.words("english")) sklearn_stopwords = stop_words.ENGLISH_STOP_WORDS custom_stopwords = ["arthur", "conan", "doyle", "chapter", "contents", "holmes", "watson", "said", "man", "mr", '`--"', "`"] self.stopwords = sklearn_stopwords.union(nltk_stopwords).union( custom_stopwords) def __call__(self, document): if self.kind == "lemma": tokens = [self.lemmatizer.lemmatize(token.lower()) for token in wordpunct_tokenize(document) if not self.is_punct(token) and not self.is_singlechar(token) and not self.is_stopword(token.lower())] else: tokens = [self.stemmer.stem(token.lower()) for token in wordpunct_tokenize(document) if not self.is_punct(token) and not self.is_singlechar(token) and not self.is_stopword(token.lower())] clean_tokens = ["NUM" if self.is_number(token) else token for token in tokens] return clean_tokens def is_punct(self, token): return all(unicodedata.category(char).startswith("P") for char in token) def is_singlechar(self, token): return len(token) < 2 def is_stopword(self, token): return token in self.stopwords def is_number(self, token): return bool(re.match(r"\d+", token)) def load_corpus(path): """ Loads corpus from specified directory. :param path: String - directory :return: raw_corpus : Bunch - object of documents """ raw_corpus = load_files(path) return raw_corpus def tfidf_vectorizer(): """ Initializes tfidf vectorizer :return: TfidfVectorizer object """ tfidf = TfidfVectorizer(strip_accents="ascii", tokenizer=TextWrangler(kind="stem")) return tfidf class Collections(object): """ Collection of stories. Provide maps of collection titles and encodings. Attributes ---------- original : dict Original collection titles by Arthur Conan Doyle novel : dict Novel collection titles created from dominant words in each topic """ def __init__(self): pass @property def original(self): collection = { 0: "His Last Bow", 1: "The Adventures of Sherlock Holmes", 2: "The Case-Book of Sherlock_Holmes", 3: "The Memoirs of Sherlock Holmes", 4: "The Return of Sherlock Holmes" } return collection @property def novel(self): collection = { 0: "The Whispering Ways Sherlock Holmes Waits to Act on Waste", 1: "Vengeful Wednesdays: Unexpected Incidences on the Tapering Train by Sherlock Holmes", 2: "A Private Journey of Sherlock Holmes: Thirteen Unfolded Veins on the Move", 3: "Sherlock Holmes Tumbling into the hanging arms of Scylla", 4: "The Shooking Jaw of Sherlock Holmes in the Villa of the Baronet" } return collection
apache-2.0
Sonophoto/pyDGW
SimpleExample.py
1
3179
""" ************************************************************************* ____ _ _ _____ ____ __ __ / ___|(_)_ __ ___ _ __ | | ___ | ___/ ___|| \/ | \___ \| | '_ ` _ \| '_ \| |/ _ \ | |_ \___ \| |\/| | ___) | | | | | | | |_) | | __/ | _| ___) | | | | |____/|_|_| |_| |_| .__/|_|\___| |_| |____/|_| |_| |_| FILENAME: SimpleExample.py AUTHOR: "Brig Young, https://github.com/Sonophoto/" PURPOSE: "Implements the simplest possible FSM, Extra Comments" COPYRIGHT: "Copyright 2016-2020 Brig Young, Sonophotostudios.com" LICENSE: " BSD 2-Clause, (Citation Required) See LICENSE file" ************************************************************************* """ import pyDGW # Import the pyDGW module # First we create the state variable that we pass around. # This is a trivial example but important Simple_state = pyDGW.DGW_data() # Instatiate a DGW_data object Simple_state.the_answer = 0 # Extend Simple_state with data members # Next we create the Graph Walker object that will hold our state methods DGW_Simple = pyDGW.DGWalker() # Instantiate a DGWalker object # Next we define the operator functions for each state in our state machine # We use logic in each operator to determine which edge we will follow when # we change state. # An operator could contain another state machine. def OP_start(Simple_state): # A Start Node is required """Our start state""" # Optional Docstring print("Entering the start node") # Optional messaging Simple_state.the_answer = 1 # Do something to change data if Simple_state.the_answer : # Use logic to determine next state operator = "stop" # operator is the next state print("Next Operator is:", operator) # Optional messaging return(operator, Simple_state) # Pass operator and modified state # back to DGWalker which will load # and execute the next node. def OP_stop(Simple_state): """Our stop state""" Simple_state.the_answer = 42 # Do any final processing at shutdown print("We have stopped") return(Simple_state) # Returning our completed "output" # Finally we use the methods of pyDGW to build our state machine and run it DGW_Simple.DEBUG = True # Setup any desired runtime options DGW_Simple.addNode("start", OP_start) # Add a callback for each state node DGW_Simple.addNode("stop", OP_stop) DGW_Simple.setStartNode("start") # Define ONE starting node DGW_Simple.setEndNode("stop") # Define ONE or MORE ending nodes DGW_Simple.run(Simple_state) # Run the initialized graph walker # Now you can do additional post processing on Simple_state, generate other # output with matplotlib, and or format the information and print it on screen: print("The Answer to Life, The Universe and Everything Is: ", Simple_state.the_answer)
bsd-2-clause
GGoussar/scikit-image
doc/examples/xx_applications/plot_coins_segmentation.py
5
5075
""" ================================================== Comparing edge-based and region-based segmentation ================================================== In this example, we will see how to segment objects from a background. We use the ``coins`` image from ``skimage.data``, which shows several coins outlined against a darker background. """ import numpy as np import matplotlib.pyplot as plt from skimage import data coins = data.coins() hist = np.histogram(coins, bins=np.arange(0, 256)) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 3)) ax1.imshow(coins, cmap=plt.cm.gray, interpolation='nearest') ax1.axis('off') ax2.plot(hist[1][:-1], hist[0], lw=2) ax2.set_title('histogram of grey values') ###################################################################### # # Thresholding # ============ # # A simple way to segment the coins is to choose a threshold based on the # histogram of grey values. Unfortunately, thresholding this image gives a # binary image that either misses significant parts of the coins or merges # parts of the background with the coins: fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(6, 3), sharex=True, sharey=True) ax1.imshow(coins > 100, cmap=plt.cm.gray, interpolation='nearest') ax1.set_title('coins > 100') ax1.axis('off') ax1.set_adjustable('box-forced') ax2.imshow(coins > 150, cmap=plt.cm.gray, interpolation='nearest') ax2.set_title('coins > 150') ax2.axis('off') ax2.set_adjustable('box-forced') margins = dict(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1) fig.subplots_adjust(**margins) ###################################################################### # Edge-based segmentation # ======================= # # Next, we try to delineate the contours of the coins using edge-based # segmentation. To do this, we first get the edges of features using the # Canny edge-detector. from skimage.feature import canny edges = canny(coins/255.) fig, ax = plt.subplots(figsize=(4, 3)) ax.imshow(edges, cmap=plt.cm.gray, interpolation='nearest') ax.axis('off') ax.set_title('Canny detector') ###################################################################### # These contours are then filled using mathematical morphology. from scipy import ndimage as ndi fill_coins = ndi.binary_fill_holes(edges) fig, ax = plt.subplots(figsize=(4, 3)) ax.imshow(fill_coins, cmap=plt.cm.gray, interpolation='nearest') ax.axis('off') ax.set_title('Filling the holes') ###################################################################### # Small spurious objects are easily removed by setting a minimum size for # valid objects. from skimage import morphology coins_cleaned = morphology.remove_small_objects(fill_coins, 21) fig, ax = plt.subplots(figsize=(4, 3)) ax.imshow(coins_cleaned, cmap=plt.cm.gray, interpolation='nearest') ax.axis('off') ax.set_title('Removing small objects') ###################################################################### # However, this method is not very robust, since contours that are not # perfectly closed are not filled correctly, as is the case for one unfilled # coin above. # #Region-based segmentation #========================= # #We therefore try a region-based method using the watershed transform. #First, we find an elevation map using the Sobel gradient of the image. from skimage.filters import sobel elevation_map = sobel(coins) fig, ax = plt.subplots(figsize=(4, 3)) ax.imshow(elevation_map, cmap=plt.cm.gray, interpolation='nearest') ax.axis('off') ax.set_title('elevation_map') ###################################################################### # Next we find markers of the background and the coins based on the extreme # parts of the histogram of grey values. markers = np.zeros_like(coins) markers[coins < 30] = 1 markers[coins > 150] = 2 fig, ax = plt.subplots(figsize=(4, 3)) ax.imshow(markers, cmap=plt.cm.spectral, interpolation='nearest') ax.axis('off') ax.set_title('markers') ###################################################################### # Finally, we use the watershed transform to fill regions of the elevation # map starting from the markers determined above: segmentation = morphology.watershed(elevation_map, markers) fig, ax = plt.subplots(figsize=(4, 3)) ax.imshow(segmentation, cmap=plt.cm.gray, interpolation='nearest') ax.axis('off') ax.set_title('segmentation') ###################################################################### # This last method works even better, and the coins can be segmented and # labeled individually. from skimage.color import label2rgb segmentation = ndi.binary_fill_holes(segmentation - 1) labeled_coins, _ = ndi.label(segmentation) image_label_overlay = label2rgb(labeled_coins, image=coins) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(6, 3), sharex=True, sharey=True) ax1.imshow(coins, cmap=plt.cm.gray, interpolation='nearest') ax1.contour(segmentation, [0.5], linewidths=1.2, colors='y') ax1.axis('off') ax1.set_adjustable('box-forced') ax2.imshow(image_label_overlay, interpolation='nearest') ax2.axis('off') ax2.set_adjustable('box-forced') fig.subplots_adjust(**margins)
bsd-3-clause
harisbal/pandas
pandas/io/stata.py
1
107807
""" Module contains tools for processing Stata files into DataFrames The StataReader below was originally written by Joe Presbrey as part of PyDTA. It has been extended and improved by Skipper Seabold from the Statsmodels project who also developed the StataWriter and was finally added to pandas in a once again improved version. You can find more information on http://presbrey.mit.edu/PyDTA and http://www.statsmodels.org/devel/ """ from collections import OrderedDict import datetime import struct import sys import warnings from dateutil.relativedelta import relativedelta import numpy as np from pandas._libs.lib import infer_dtype from pandas._libs.tslibs import NaT, Timestamp from pandas._libs.writers import max_len_string_array from pandas.compat import ( BytesIO, lmap, lrange, lzip, range, string_types, text_type, zip) from pandas.util._decorators import Appender, deprecate_kwarg from pandas.core.dtypes.common import ( ensure_object, is_categorical_dtype, is_datetime64_dtype) from pandas import DatetimeIndex, compat, isna, to_datetime, to_timedelta from pandas.core.arrays import Categorical from pandas.core.base import StringMixin from pandas.core.frame import DataFrame from pandas.core.series import Series from pandas.io.common import ( BaseIterator, _stringify_path, get_filepath_or_buffer) _version_error = ("Version of given Stata file is not 104, 105, 108, " "111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), " "115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)") _statafile_processing_params1 = """\ convert_dates : boolean, defaults to True Convert date variables to DataFrame time values. convert_categoricals : boolean, defaults to True Read value labels and convert columns to Categorical/Factor variables.""" _encoding_params = """\ encoding : string, None or encoding Encoding used to parse the files. None defaults to latin-1.""" _statafile_processing_params2 = """\ index_col : string, optional, default: None Column to set as index. convert_missing : boolean, defaults to False Flag indicating whether to convert missing values to their Stata representations. If False, missing values are replaced with nan. If True, columns containing missing values are returned with object data types and missing values are represented by StataMissingValue objects. preserve_dtypes : boolean, defaults to True Preserve Stata datatypes. If False, numeric data are upcast to pandas default types for foreign data (float64 or int64). columns : list or None Columns to retain. Columns will be returned in the given order. None returns all columns. order_categoricals : boolean, defaults to True Flag indicating whether converted categorical data are ordered.""" _chunksize_params = """\ chunksize : int, default None Return StataReader object for iterations, returns chunks with given number of lines.""" _iterator_params = """\ iterator : boolean, default False Return StataReader object.""" _read_stata_doc = """ Read Stata file into DataFrame. Parameters ---------- filepath_or_buffer : string or file-like object Path to .dta file or object implementing a binary read() functions. %s %s %s %s %s Returns ------- DataFrame or StataReader See Also -------- pandas.io.stata.StataReader : low-level reader for Stata data files pandas.DataFrame.to_stata: export Stata data files Examples -------- Read a Stata dta file: >>> df = pd.read_stata('filename.dta') Read a Stata dta file in 10,000 line chunks: >>> itr = pd.read_stata('filename.dta', chunksize=10000) >>> for chunk in itr: ... do_something(chunk) """ % (_statafile_processing_params1, _encoding_params, _statafile_processing_params2, _chunksize_params, _iterator_params) _data_method_doc = """\ Reads observations from Stata file, converting them into a dataframe .. deprecated:: This is a legacy method. Use `read` in new code. Parameters ---------- %s %s Returns ------- DataFrame """ % (_statafile_processing_params1, _statafile_processing_params2) _read_method_doc = """\ Reads observations from Stata file, converting them into a dataframe Parameters ---------- nrows : int Number of lines to read from data file, if None read whole file. %s %s Returns ------- DataFrame """ % (_statafile_processing_params1, _statafile_processing_params2) _stata_reader_doc = """\ Class for reading Stata dta files. Parameters ---------- path_or_buf : path (string), buffer or path object string, path object (pathlib.Path or py._path.local.LocalPath) or object implementing a binary read() functions. .. versionadded:: 0.23.0 support for pathlib, py.path. %s %s %s %s """ % (_statafile_processing_params1, _statafile_processing_params2, _encoding_params, _chunksize_params) @Appender(_read_stata_doc) @deprecate_kwarg(old_arg_name='encoding', new_arg_name=None) @deprecate_kwarg(old_arg_name='index', new_arg_name='index_col') def read_stata(filepath_or_buffer, convert_dates=True, convert_categoricals=True, encoding=None, index_col=None, convert_missing=False, preserve_dtypes=True, columns=None, order_categoricals=True, chunksize=None, iterator=False): reader = StataReader(filepath_or_buffer, convert_dates=convert_dates, convert_categoricals=convert_categoricals, index_col=index_col, convert_missing=convert_missing, preserve_dtypes=preserve_dtypes, columns=columns, order_categoricals=order_categoricals, chunksize=chunksize) if iterator or chunksize: data = reader else: try: data = reader.read() finally: reader.close() return data _date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"] stata_epoch = datetime.datetime(1960, 1, 1) def _stata_elapsed_date_to_datetime_vec(dates, fmt): """ Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime Parameters ---------- dates : Series The Stata Internal Format date to convert to datetime according to fmt fmt : str The format to convert to. Can be, tc, td, tw, tm, tq, th, ty Returns Returns ------- converted : Series The converted dates Examples -------- >>> dates = pd.Series([52]) >>> _stata_elapsed_date_to_datetime_vec(dates , "%tw") 0 1961-01-01 dtype: datetime64[ns] Notes ----- datetime/c - tc milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day datetime/C - tC - NOT IMPLEMENTED milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds date - td days since 01jan1960 (01jan1960 = 0) weekly date - tw weeks since 1960w1 This assumes 52 weeks in a year, then adds 7 * remainder of the weeks. The datetime value is the start of the week in terms of days in the year, not ISO calendar weeks. monthly date - tm months since 1960m1 quarterly date - tq quarters since 1960q1 half-yearly date - th half-years since 1960h1 yearly date - ty years since 0000 If you don't have pandas with datetime support, then you can't do milliseconds accurately. """ MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000 MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000 def convert_year_month_safe(year, month): """ Convert year and month to datetimes, using pandas vectorized versions when the date range falls within the range supported by pandas. Otherwise it falls back to a slower but more robust method using datetime. """ if year.max() < MAX_YEAR and year.min() > MIN_YEAR: return to_datetime(100 * year + month, format='%Y%m') else: index = getattr(year, 'index', None) return Series( [datetime.datetime(y, m, 1) for y, m in zip(year, month)], index=index) def convert_year_days_safe(year, days): """ Converts year (e.g. 1999) and days since the start of the year to a datetime or datetime64 Series """ if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR: return (to_datetime(year, format='%Y') + to_timedelta(days, unit='d')) else: index = getattr(year, 'index', None) value = [datetime.datetime(y, 1, 1) + relativedelta(days=int(d)) for y, d in zip(year, days)] return Series(value, index=index) def convert_delta_safe(base, deltas, unit): """ Convert base dates and deltas to datetimes, using pandas vectorized versions if the deltas satisfy restrictions required to be expressed as dates in pandas. """ index = getattr(deltas, 'index', None) if unit == 'd': if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA: values = [base + relativedelta(days=int(d)) for d in deltas] return Series(values, index=index) elif unit == 'ms': if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA: values = [base + relativedelta(microseconds=(int(d) * 1000)) for d in deltas] return Series(values, index=index) else: raise ValueError('format not understood') base = to_datetime(base) deltas = to_timedelta(deltas, unit=unit) return base + deltas # TODO: If/when pandas supports more than datetime64[ns], this should be # improved to use correct range, e.g. datetime[Y] for yearly bad_locs = np.isnan(dates) has_bad_values = False if bad_locs.any(): has_bad_values = True data_col = Series(dates) data_col[bad_locs] = 1.0 # Replace with NaT dates = dates.astype(np.int64) if fmt.startswith(("%tc", "tc")): # Delta ms relative to base base = stata_epoch ms = dates conv_dates = convert_delta_safe(base, ms, 'ms') elif fmt.startswith(("%tC", "tC")): warnings.warn("Encountered %tC format. Leaving in Stata " "Internal Format.") conv_dates = Series(dates, dtype=np.object) if has_bad_values: conv_dates[bad_locs] = NaT return conv_dates # Delta days relative to base elif fmt.startswith(("%td", "td", "%d", "d")): base = stata_epoch days = dates conv_dates = convert_delta_safe(base, days, 'd') # does not count leap days - 7 days is a week. # 52nd week may have more than 7 days elif fmt.startswith(("%tw", "tw")): year = stata_epoch.year + dates // 52 days = (dates % 52) * 7 conv_dates = convert_year_days_safe(year, days) elif fmt.startswith(("%tm", "tm")): # Delta months relative to base year = stata_epoch.year + dates // 12 month = (dates % 12) + 1 conv_dates = convert_year_month_safe(year, month) elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base year = stata_epoch.year + dates // 4 month = (dates % 4) * 3 + 1 conv_dates = convert_year_month_safe(year, month) elif fmt.startswith(("%th", "th")): # Delta half-years relative to base year = stata_epoch.year + dates // 2 month = (dates % 2) * 6 + 1 conv_dates = convert_year_month_safe(year, month) elif fmt.startswith(("%ty", "ty")): # Years -- not delta year = dates month = np.ones_like(dates) conv_dates = convert_year_month_safe(year, month) else: raise ValueError("Date fmt %s not understood" % fmt) if has_bad_values: # Restore NaT for bad values conv_dates[bad_locs] = NaT return conv_dates def _datetime_to_stata_elapsed_vec(dates, fmt): """ Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime Parameters ---------- dates : Series Series or array containing datetime.datetime or datetime64[ns] to convert to the Stata Internal Format given by fmt fmt : str The format to convert to. Can be, tc, td, tw, tm, tq, th, ty """ index = dates.index NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000 US_PER_DAY = NS_PER_DAY / 1000 def parse_dates_safe(dates, delta=False, year=False, days=False): d = {} if is_datetime64_dtype(dates.values): if delta: delta = dates - stata_epoch d['delta'] = delta.values.astype( np.int64) // 1000 # microseconds if days or year: dates = DatetimeIndex(dates) d['year'], d['month'] = dates.year, dates.month if days: days = (dates.astype(np.int64) - to_datetime(d['year'], format='%Y').astype(np.int64)) d['days'] = days // NS_PER_DAY elif infer_dtype(dates) == 'datetime': if delta: delta = dates.values - stata_epoch f = lambda x: \ US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds v = np.vectorize(f) d['delta'] = v(delta) if year: year_month = dates.apply(lambda x: 100 * x.year + x.month) d['year'] = year_month.values // 100 d['month'] = (year_month.values - d['year'] * 100) if days: f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days v = np.vectorize(f) d['days'] = v(dates) else: raise ValueError('Columns containing dates must contain either ' 'datetime64, datetime.datetime or null values.') return DataFrame(d, index=index) bad_loc = isna(dates) index = dates.index if bad_loc.any(): dates = Series(dates) if is_datetime64_dtype(dates): dates[bad_loc] = to_datetime(stata_epoch) else: dates[bad_loc] = stata_epoch if fmt in ["%tc", "tc"]: d = parse_dates_safe(dates, delta=True) conv_dates = d.delta / 1000 elif fmt in ["%tC", "tC"]: warnings.warn("Stata Internal Format tC not supported.") conv_dates = dates elif fmt in ["%td", "td"]: d = parse_dates_safe(dates, delta=True) conv_dates = d.delta // US_PER_DAY elif fmt in ["%tw", "tw"]: d = parse_dates_safe(dates, year=True, days=True) conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7) elif fmt in ["%tm", "tm"]: d = parse_dates_safe(dates, year=True) conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1) elif fmt in ["%tq", "tq"]: d = parse_dates_safe(dates, year=True) conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3 elif fmt in ["%th", "th"]: d = parse_dates_safe(dates, year=True) conv_dates = (2 * (d.year - stata_epoch.year) + (d.month > 6).astype(np.int)) elif fmt in ["%ty", "ty"]: d = parse_dates_safe(dates, year=True) conv_dates = d.year else: raise ValueError("Format %s is not a known Stata date format" % fmt) conv_dates = Series(conv_dates, dtype=np.float64) missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0] conv_dates[bad_loc] = missing_value return Series(conv_dates, index=index) excessive_string_length_error = """ Fixed width strings in Stata .dta files are limited to 244 (or fewer) characters. Column '%s' does not satisfy this restriction. """ class PossiblePrecisionLoss(Warning): pass precision_loss_doc = """ Column converted from %s to %s, and some data are outside of the lossless conversion range. This may result in a loss of precision in the saved data. """ class ValueLabelTypeMismatch(Warning): pass value_label_mismatch_doc = """ Stata value labels (pandas categories) must be strings. Column {0} contains non-string labels which will be converted to strings. Please check that the Stata data file created has not lost information due to duplicate labels. """ class InvalidColumnName(Warning): pass invalid_name_doc = """ Not all pandas column names were valid Stata variable names. The following replacements have been made: {0} If this is not what you expect, please make sure you have Stata-compliant column names in your DataFrame (strings only, max 32 characters, only alphanumerics and underscores, no Stata reserved words) """ def _cast_to_stata_types(data): """Checks the dtypes of the columns of a pandas DataFrame for compatibility with the data types and ranges supported by Stata, and converts if necessary. Parameters ---------- data : DataFrame The DataFrame to check and convert Notes ----- Numeric columns in Stata must be one of int8, int16, int32, float32 or float64, with some additional value restrictions. int8 and int16 columns are checked for violations of the value restrictions and upcast if needed. int64 data is not usable in Stata, and so it is downcast to int32 whenever the value are in the int32 range, and sidecast to float64 when larger than this range. If the int64 values are outside of the range of those perfectly representable as float64 values, a warning is raised. bool columns are cast to int8. uint columns are converted to int of the same size if there is no loss in precision, otherwise are upcast to a larger type. uint64 is currently not supported since it is concerted to object in a DataFrame. """ ws = '' # original, if small, if large conversion_data = ((np.bool, np.int8, np.int8), (np.uint8, np.int8, np.int16), (np.uint16, np.int16, np.int32), (np.uint32, np.int32, np.int64)) float32_max = struct.unpack('<f', b'\xff\xff\xff\x7e')[0] float64_max = struct.unpack('<d', b'\xff\xff\xff\xff\xff\xff\xdf\x7f')[0] for col in data: dtype = data[col].dtype # Cast from unsupported types to supported types for c_data in conversion_data: if dtype == c_data[0]: if data[col].max() <= np.iinfo(c_data[1]).max: dtype = c_data[1] else: dtype = c_data[2] if c_data[2] == np.float64: # Warn if necessary if data[col].max() >= 2 ** 53: ws = precision_loss_doc % ('uint64', 'float64') data[col] = data[col].astype(dtype) # Check values and upcast if necessary if dtype == np.int8: if data[col].max() > 100 or data[col].min() < -127: data[col] = data[col].astype(np.int16) elif dtype == np.int16: if data[col].max() > 32740 or data[col].min() < -32767: data[col] = data[col].astype(np.int32) elif dtype == np.int64: if (data[col].max() <= 2147483620 and data[col].min() >= -2147483647): data[col] = data[col].astype(np.int32) else: data[col] = data[col].astype(np.float64) if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53: ws = precision_loss_doc % ('int64', 'float64') elif dtype in (np.float32, np.float64): value = data[col].max() if np.isinf(value): raise ValueError('Column {col} has a maximum value of ' 'infinity which is outside the range ' 'supported by Stata.'.format(col=col)) if dtype == np.float32 and value > float32_max: data[col] = data[col].astype(np.float64) elif dtype == np.float64: if value > float64_max: raise ValueError('Column {col} has a maximum value ' '({val}) outside the range supported by ' 'Stata ({float64_max})' .format(col=col, val=value, float64_max=float64_max)) if ws: warnings.warn(ws, PossiblePrecisionLoss) return data class StataValueLabel(object): """ Parse a categorical column and prepare formatted output Parameters ----------- value : int8, int16, int32, float32 or float64 The Stata missing value code Attributes ---------- string : string String representation of the Stata missing value value : int8, int16, int32, float32 or float64 The original encoded missing value Methods ------- generate_value_label """ def __init__(self, catarray): self.labname = catarray.name categories = catarray.cat.categories self.value_labels = list(zip(np.arange(len(categories)), categories)) self.value_labels.sort(key=lambda x: x[0]) self.text_len = np.int32(0) self.off = [] self.val = [] self.txt = [] self.n = 0 # Compute lengths and setup lists of offsets and labels for vl in self.value_labels: category = vl[1] if not isinstance(category, string_types): category = str(category) warnings.warn(value_label_mismatch_doc.format(catarray.name), ValueLabelTypeMismatch) self.off.append(self.text_len) self.text_len += len(category) + 1 # +1 for the padding self.val.append(vl[0]) self.txt.append(category) self.n += 1 if self.text_len > 32000: raise ValueError('Stata value labels for a single variable must ' 'have a combined length less than 32,000 ' 'characters.') # Ensure int32 self.off = np.array(self.off, dtype=np.int32) self.val = np.array(self.val, dtype=np.int32) # Total length self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len def _encode(self, s): """ Python 3 compatibility shim """ if compat.PY3: return s.encode(self._encoding) else: return s def generate_value_label(self, byteorder, encoding): """ Parameters ---------- byteorder : str Byte order of the output encoding : str File encoding Returns ------- value_label : bytes Bytes containing the formatted value label """ self._encoding = encoding bio = BytesIO() null_string = '\x00' null_byte = b'\x00' # len bio.write(struct.pack(byteorder + 'i', self.len)) # labname labname = self._encode(_pad_bytes(self.labname[:32], 33)) bio.write(labname) # padding - 3 bytes for i in range(3): bio.write(struct.pack('c', null_byte)) # value_label_table # n - int32 bio.write(struct.pack(byteorder + 'i', self.n)) # textlen - int32 bio.write(struct.pack(byteorder + 'i', self.text_len)) # off - int32 array (n elements) for offset in self.off: bio.write(struct.pack(byteorder + 'i', offset)) # val - int32 array (n elements) for value in self.val: bio.write(struct.pack(byteorder + 'i', value)) # txt - Text labels, null terminated for text in self.txt: bio.write(self._encode(text + null_string)) bio.seek(0) return bio.read() class StataMissingValue(StringMixin): """ An observation's missing value. Parameters ----------- value : int8, int16, int32, float32 or float64 The Stata missing value code Attributes ---------- string : string String representation of the Stata missing value value : int8, int16, int32, float32 or float64 The original encoded missing value Notes ----- More information: <http://www.stata.com/help.cgi?missing> Integer missing values make the code '.', '.a', ..., '.z' to the ranges 101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ... 2147483647 (for int32). Missing values for floating point data types are more complex but the pattern is simple to discern from the following table. np.float32 missing values (float in Stata) 0000007f . 0008007f .a 0010007f .b ... 00c0007f .x 00c8007f .y 00d0007f .z np.float64 missing values (double in Stata) 000000000000e07f . 000000000001e07f .a 000000000002e07f .b ... 000000000018e07f .x 000000000019e07f .y 00000000001ae07f .z """ # Construct a dictionary of missing values MISSING_VALUES = {} bases = (101, 32741, 2147483621) for b in bases: # Conversion to long to avoid hash issues on 32 bit platforms #8968 MISSING_VALUES[compat.long(b)] = '.' for i in range(1, 27): MISSING_VALUES[compat.long(i + b)] = '.' + chr(96 + i) float32_base = b'\x00\x00\x00\x7f' increment = struct.unpack('<i', b'\x00\x08\x00\x00')[0] for i in range(27): value = struct.unpack('<f', float32_base)[0] MISSING_VALUES[value] = '.' if i > 0: MISSING_VALUES[value] += chr(96 + i) int_value = struct.unpack('<i', struct.pack('<f', value))[ 0] + increment float32_base = struct.pack('<i', int_value) float64_base = b'\x00\x00\x00\x00\x00\x00\xe0\x7f' increment = struct.unpack('q', b'\x00\x00\x00\x00\x00\x01\x00\x00')[0] for i in range(27): value = struct.unpack('<d', float64_base)[0] MISSING_VALUES[value] = '.' if i > 0: MISSING_VALUES[value] += chr(96 + i) int_value = struct.unpack('q', struct.pack('<d', value))[0] + increment float64_base = struct.pack('q', int_value) BASE_MISSING_VALUES = {'int8': 101, 'int16': 32741, 'int32': 2147483621, 'float32': struct.unpack('<f', float32_base)[0], 'float64': struct.unpack('<d', float64_base)[0]} def __init__(self, value): self._value = value # Conversion to long to avoid hash issues on 32 bit platforms #8968 value = compat.long(value) if value < 2147483648 else float(value) self._str = self.MISSING_VALUES[value] string = property(lambda self: self._str, doc="The Stata representation of the missing value: " "'.', '.a'..'.z'") value = property(lambda self: self._value, doc='The binary representation of the missing value.') def __unicode__(self): return self.string def __repr__(self): # not perfect :-/ return "%s(%s)" % (self.__class__, self) def __eq__(self, other): return (isinstance(other, self.__class__) and self.string == other.string and self.value == other.value) @classmethod def get_base_missing_value(cls, dtype): if dtype == np.int8: value = cls.BASE_MISSING_VALUES['int8'] elif dtype == np.int16: value = cls.BASE_MISSING_VALUES['int16'] elif dtype == np.int32: value = cls.BASE_MISSING_VALUES['int32'] elif dtype == np.float32: value = cls.BASE_MISSING_VALUES['float32'] elif dtype == np.float64: value = cls.BASE_MISSING_VALUES['float64'] else: raise ValueError('Unsupported dtype') return value class StataParser(object): def __init__(self): # type code. # -------------------- # str1 1 = 0x01 # str2 2 = 0x02 # ... # str244 244 = 0xf4 # byte 251 = 0xfb (sic) # int 252 = 0xfc # long 253 = 0xfd # float 254 = 0xfe # double 255 = 0xff # -------------------- # NOTE: the byte type seems to be reserved for categorical variables # with a label, but the underlying variable is -127 to 100 # we're going to drop the label and cast to int self.DTYPE_MAP = \ dict( lzip(range(1, 245), ['a' + str(i) for i in range(1, 245)]) + [ (251, np.int8), (252, np.int16), (253, np.int32), (254, np.float32), (255, np.float64) ] ) self.DTYPE_MAP_XML = \ dict( [ (32768, np.uint8), # Keys to GSO (65526, np.float64), (65527, np.float32), (65528, np.int32), (65529, np.int16), (65530, np.int8) ] ) self.TYPE_MAP = lrange(251) + list('bhlfd') self.TYPE_MAP_XML = \ dict( [ # Not really a Q, unclear how to handle byteswap (32768, 'Q'), (65526, 'd'), (65527, 'f'), (65528, 'l'), (65529, 'h'), (65530, 'b') ] ) # NOTE: technically, some of these are wrong. there are more numbers # that can be represented. it's the 27 ABOVE and BELOW the max listed # numeric data type in [U] 12.2.2 of the 11.2 manual float32_min = b'\xff\xff\xff\xfe' float32_max = b'\xff\xff\xff\x7e' float64_min = b'\xff\xff\xff\xff\xff\xff\xef\xff' float64_max = b'\xff\xff\xff\xff\xff\xff\xdf\x7f' self.VALID_RANGE = { 'b': (-127, 100), 'h': (-32767, 32740), 'l': (-2147483647, 2147483620), 'f': (np.float32(struct.unpack('<f', float32_min)[0]), np.float32(struct.unpack('<f', float32_max)[0])), 'd': (np.float64(struct.unpack('<d', float64_min)[0]), np.float64(struct.unpack('<d', float64_max)[0])) } self.OLD_TYPE_MAPPING = { 98: 251, # byte 105: 252, # int 108: 253, # long 102: 254 # float # don't know old code for double } # These missing values are the generic '.' in Stata, and are used # to replace nans self.MISSING_VALUES = { 'b': 101, 'h': 32741, 'l': 2147483621, 'f': np.float32(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]), 'd': np.float64( struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]) } self.NUMPY_TYPE_MAP = { 'b': 'i1', 'h': 'i2', 'l': 'i4', 'f': 'f4', 'd': 'f8', 'Q': 'u8' } # Reserved words cannot be used as variable names self.RESERVED_WORDS = ('aggregate', 'array', 'boolean', 'break', 'byte', 'case', 'catch', 'class', 'colvector', 'complex', 'const', 'continue', 'default', 'delegate', 'delete', 'do', 'double', 'else', 'eltypedef', 'end', 'enum', 'explicit', 'export', 'external', 'float', 'for', 'friend', 'function', 'global', 'goto', 'if', 'inline', 'int', 'local', 'long', 'NULL', 'pragma', 'protected', 'quad', 'rowvector', 'short', 'typedef', 'typename', 'virtual') class StataReader(StataParser, BaseIterator): __doc__ = _stata_reader_doc @deprecate_kwarg(old_arg_name='encoding', new_arg_name=None) @deprecate_kwarg(old_arg_name='index', new_arg_name='index_col') def __init__(self, path_or_buf, convert_dates=True, convert_categoricals=True, index_col=None, convert_missing=False, preserve_dtypes=True, columns=None, order_categoricals=True, encoding=None, chunksize=None): super(StataReader, self).__init__() self.col_sizes = () # Arguments to the reader (can be temporarily overridden in # calls to read). self._convert_dates = convert_dates self._convert_categoricals = convert_categoricals self._index_col = index_col self._convert_missing = convert_missing self._preserve_dtypes = preserve_dtypes self._columns = columns self._order_categoricals = order_categoricals self._encoding = None self._chunksize = chunksize # State variables for the file self._has_string_data = False self._missing_values = False self._can_read_value_labels = False self._column_selector_set = False self._value_labels_read = False self._data_read = False self._dtype = None self._lines_read = 0 self._native_byteorder = _set_endianness(sys.byteorder) path_or_buf = _stringify_path(path_or_buf) if isinstance(path_or_buf, str): path_or_buf, encoding, _, should_close = get_filepath_or_buffer( path_or_buf) if isinstance(path_or_buf, (str, text_type, bytes)): self.path_or_buf = open(path_or_buf, 'rb') else: # Copy to BytesIO, and ensure no encoding contents = path_or_buf.read() self.path_or_buf = BytesIO(contents) self._read_header() self._setup_dtype() def __enter__(self): """ enter context manager """ return self def __exit__(self, exc_type, exc_value, traceback): """ exit context manager """ self.close() def close(self): """ close the handle if its open """ try: self.path_or_buf.close() except IOError: pass def _set_encoding(self): """ Set string encoding which depends on file version """ if self.format_version < 118: self._encoding = 'latin-1' else: self._encoding = 'utf-8' def _read_header(self): first_char = self.path_or_buf.read(1) if struct.unpack('c', first_char)[0] == b'<': self._read_new_header(first_char) else: self._read_old_header(first_char) self.has_string_data = len([x for x in self.typlist if type(x) is int]) > 0 # calculate size of a data record self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist) def _read_new_header(self, first_char): # The first part of the header is common to 117 and 118. self.path_or_buf.read(27) # stata_dta><header><release> self.format_version = int(self.path_or_buf.read(3)) if self.format_version not in [117, 118]: raise ValueError(_version_error) self._set_encoding() self.path_or_buf.read(21) # </release><byteorder> self.byteorder = self.path_or_buf.read(3) == b'MSF' and '>' or '<' self.path_or_buf.read(15) # </byteorder><K> self.nvar = struct.unpack(self.byteorder + 'H', self.path_or_buf.read(2))[0] self.path_or_buf.read(7) # </K><N> self.nobs = self._get_nobs() self.path_or_buf.read(11) # </N><label> self.data_label = self._get_data_label() self.path_or_buf.read(19) # </label><timestamp> self.time_stamp = self._get_time_stamp() self.path_or_buf.read(26) # </timestamp></header><map> self.path_or_buf.read(8) # 0x0000000000000000 self.path_or_buf.read(8) # position of <map> self._seek_vartypes = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16 self._seek_varnames = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10 self._seek_sortlist = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10 self._seek_formats = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9 self._seek_value_label_names = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19 # Requires version-specific treatment self._seek_variable_labels = self._get_seek_variable_labels() self.path_or_buf.read(8) # <characteristics> self.data_location = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6 self.seek_strls = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7 self.seek_value_labels = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14 self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes) self.path_or_buf.seek(self._seek_varnames) self.varlist = self._get_varlist() self.path_or_buf.seek(self._seek_sortlist) self.srtlist = struct.unpack( self.byteorder + ('h' * (self.nvar + 1)), self.path_or_buf.read(2 * (self.nvar + 1)) )[:-1] self.path_or_buf.seek(self._seek_formats) self.fmtlist = self._get_fmtlist() self.path_or_buf.seek(self._seek_value_label_names) self.lbllist = self._get_lbllist() self.path_or_buf.seek(self._seek_variable_labels) self._variable_labels = self._get_variable_labels() # Get data type information, works for versions 117-118. def _get_dtypes(self, seek_vartypes): self.path_or_buf.seek(seek_vartypes) raw_typlist = [struct.unpack(self.byteorder + 'H', self.path_or_buf.read(2))[0] for i in range(self.nvar)] def f(typ): if typ <= 2045: return typ try: return self.TYPE_MAP_XML[typ] except KeyError: raise ValueError("cannot convert stata types [{0}]". format(typ)) typlist = [f(x) for x in raw_typlist] def f(typ): if typ <= 2045: return str(typ) try: return self.DTYPE_MAP_XML[typ] except KeyError: raise ValueError("cannot convert stata dtype [{0}]" .format(typ)) dtyplist = [f(x) for x in raw_typlist] return typlist, dtyplist def _get_varlist(self): if self.format_version == 117: b = 33 elif self.format_version == 118: b = 129 return [self._null_terminate(self.path_or_buf.read(b)) for i in range(self.nvar)] # Returns the format list def _get_fmtlist(self): if self.format_version == 118: b = 57 elif self.format_version > 113: b = 49 elif self.format_version > 104: b = 12 else: b = 7 return [self._null_terminate(self.path_or_buf.read(b)) for i in range(self.nvar)] # Returns the label list def _get_lbllist(self): if self.format_version >= 118: b = 129 elif self.format_version > 108: b = 33 else: b = 9 return [self._null_terminate(self.path_or_buf.read(b)) for i in range(self.nvar)] def _get_variable_labels(self): if self.format_version == 118: vlblist = [self._decode(self.path_or_buf.read(321)) for i in range(self.nvar)] elif self.format_version > 105: vlblist = [self._null_terminate(self.path_or_buf.read(81)) for i in range(self.nvar)] else: vlblist = [self._null_terminate(self.path_or_buf.read(32)) for i in range(self.nvar)] return vlblist def _get_nobs(self): if self.format_version == 118: return struct.unpack(self.byteorder + 'Q', self.path_or_buf.read(8))[0] else: return struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0] def _get_data_label(self): if self.format_version == 118: strlen = struct.unpack(self.byteorder + 'H', self.path_or_buf.read(2))[0] return self._decode(self.path_or_buf.read(strlen)) elif self.format_version == 117: strlen = struct.unpack('b', self.path_or_buf.read(1))[0] return self._null_terminate(self.path_or_buf.read(strlen)) elif self.format_version > 105: return self._null_terminate(self.path_or_buf.read(81)) else: return self._null_terminate(self.path_or_buf.read(32)) def _get_time_stamp(self): if self.format_version == 118: strlen = struct.unpack('b', self.path_or_buf.read(1))[0] return self.path_or_buf.read(strlen).decode("utf-8") elif self.format_version == 117: strlen = struct.unpack('b', self.path_or_buf.read(1))[0] return self._null_terminate(self.path_or_buf.read(strlen)) elif self.format_version > 104: return self._null_terminate(self.path_or_buf.read(18)) else: raise ValueError() def _get_seek_variable_labels(self): if self.format_version == 117: self.path_or_buf.read(8) # <variable_lables>, throw away # Stata 117 data files do not follow the described format. This is # a work around that uses the previous label, 33 bytes for each # variable, 20 for the closing tag and 17 for the opening tag return self._seek_value_label_names + (33 * self.nvar) + 20 + 17 elif self.format_version == 118: return struct.unpack(self.byteorder + 'q', self.path_or_buf.read(8))[0] + 17 else: raise ValueError() def _read_old_header(self, first_char): self.format_version = struct.unpack('b', first_char)[0] if self.format_version not in [104, 105, 108, 111, 113, 114, 115]: raise ValueError(_version_error) self._set_encoding() self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[ 0] == 0x1 and '>' or '<' self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0] self.path_or_buf.read(1) # unused self.nvar = struct.unpack(self.byteorder + 'H', self.path_or_buf.read(2))[0] self.nobs = self._get_nobs() self.data_label = self._get_data_label() self.time_stamp = self._get_time_stamp() # descriptors if self.format_version > 108: typlist = [ord(self.path_or_buf.read(1)) for i in range(self.nvar)] else: buf = self.path_or_buf.read(self.nvar) typlistb = np.frombuffer(buf, dtype=np.uint8) typlist = [] for tp in typlistb: if tp in self.OLD_TYPE_MAPPING: typlist.append(self.OLD_TYPE_MAPPING[tp]) else: typlist.append(tp - 127) # py2 string, py3 bytes try: self.typlist = [self.TYPE_MAP[typ] for typ in typlist] except ValueError: raise ValueError("cannot convert stata types [{0}]" .format(','.join(str(x) for x in typlist))) try: self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist] except ValueError: raise ValueError("cannot convert stata dtypes [{0}]" .format(','.join(str(x) for x in typlist))) if self.format_version > 108: self.varlist = [self._null_terminate(self.path_or_buf.read(33)) for i in range(self.nvar)] else: self.varlist = [self._null_terminate(self.path_or_buf.read(9)) for i in range(self.nvar)] self.srtlist = struct.unpack( self.byteorder + ('h' * (self.nvar + 1)), self.path_or_buf.read(2 * (self.nvar + 1)) )[:-1] self.fmtlist = self._get_fmtlist() self.lbllist = self._get_lbllist() self._variable_labels = self._get_variable_labels() # ignore expansion fields (Format 105 and later) # When reading, read five bytes; the last four bytes now tell you # the size of the next read, which you discard. You then continue # like this until you read 5 bytes of zeros. if self.format_version > 104: while True: data_type = struct.unpack(self.byteorder + 'b', self.path_or_buf.read(1))[0] if self.format_version > 108: data_len = struct.unpack(self.byteorder + 'i', self.path_or_buf.read(4))[0] else: data_len = struct.unpack(self.byteorder + 'h', self.path_or_buf.read(2))[0] if data_type == 0: break self.path_or_buf.read(data_len) # necessary data to continue parsing self.data_location = self.path_or_buf.tell() def _setup_dtype(self): """Map between numpy and state dtypes""" if self._dtype is not None: return self._dtype dtype = [] # Convert struct data types to numpy data type for i, typ in enumerate(self.typlist): if typ in self.NUMPY_TYPE_MAP: dtype.append(('s' + str(i), self.byteorder + self.NUMPY_TYPE_MAP[typ])) else: dtype.append(('s' + str(i), 'S' + str(typ))) dtype = np.dtype(dtype) self._dtype = dtype return self._dtype def _calcsize(self, fmt): return (type(fmt) is int and fmt or struct.calcsize(self.byteorder + fmt)) def _decode(self, s): s = s.partition(b"\0")[0] return s.decode('utf-8') def _null_terminate(self, s): # have bytes not strings, so must decode s = s.partition(b"\0")[0] return s.decode(self._encoding) def _read_value_labels(self): if self._value_labels_read: # Don't read twice return if self.format_version <= 108: # Value labels are not supported in version 108 and earlier. self._value_labels_read = True self.value_label_dict = dict() return if self.format_version >= 117: self.path_or_buf.seek(self.seek_value_labels) else: offset = self.nobs * self._dtype.itemsize self.path_or_buf.seek(self.data_location + offset) self._value_labels_read = True self.value_label_dict = dict() while True: if self.format_version >= 117: if self.path_or_buf.read(5) == b'</val': # <lbl> break # end of value label table slength = self.path_or_buf.read(4) if not slength: break # end of value label table (format < 117) if self.format_version <= 117: labname = self._null_terminate(self.path_or_buf.read(33)) else: labname = self._decode(self.path_or_buf.read(129)) self.path_or_buf.read(3) # padding n = struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0] txtlen = struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0] off = np.frombuffer(self.path_or_buf.read(4 * n), dtype=self.byteorder + "i4", count=n) val = np.frombuffer(self.path_or_buf.read(4 * n), dtype=self.byteorder + "i4", count=n) ii = np.argsort(off) off = off[ii] val = val[ii] txt = self.path_or_buf.read(txtlen) self.value_label_dict[labname] = dict() for i in range(n): end = off[i + 1] if i < n - 1 else txtlen if self.format_version <= 117: self.value_label_dict[labname][val[i]] = ( self._null_terminate(txt[off[i]:end])) else: self.value_label_dict[labname][val[i]] = ( self._decode(txt[off[i]:end])) if self.format_version >= 117: self.path_or_buf.read(6) # </lbl> self._value_labels_read = True def _read_strls(self): self.path_or_buf.seek(self.seek_strls) # Wrap v_o in a string to allow uint64 values as keys on 32bit OS self.GSO = {'0': ''} while True: if self.path_or_buf.read(3) != b'GSO': break if self.format_version == 117: v_o = struct.unpack(self.byteorder + 'Q', self.path_or_buf.read(8))[0] else: buf = self.path_or_buf.read(12) # Only tested on little endian file on little endian machine. if self.byteorder == '<': buf = buf[0:2] + buf[4:10] else: buf = buf[0:2] + buf[6:] v_o = struct.unpack('Q', buf)[0] typ = struct.unpack('B', self.path_or_buf.read(1))[0] length = struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0] va = self.path_or_buf.read(length) if typ == 130: va = va[0:-1].decode(self._encoding) # Wrap v_o in a string to allow uint64 values as keys on 32bit OS self.GSO[str(v_o)] = va # legacy @Appender(_data_method_doc) def data(self, **kwargs): warnings.warn("'data' is deprecated, use 'read' instead") if self._data_read: raise Exception("Data has already been read.") self._data_read = True return self.read(None, **kwargs) def __next__(self): return self.read(nrows=self._chunksize or 1) def get_chunk(self, size=None): """ Reads lines from Stata file and returns as dataframe Parameters ---------- size : int, defaults to None Number of lines to read. If None, reads whole file. Returns ------- DataFrame """ if size is None: size = self._chunksize return self.read(nrows=size) @Appender(_read_method_doc) @deprecate_kwarg(old_arg_name='index', new_arg_name='index_col') def read(self, nrows=None, convert_dates=None, convert_categoricals=None, index_col=None, convert_missing=None, preserve_dtypes=None, columns=None, order_categoricals=None): # Handle empty file or chunk. If reading incrementally raise # StopIteration. If reading the whole thing return an empty # data frame. if (self.nobs == 0) and (nrows is None): self._can_read_value_labels = True self._data_read = True self.close() return DataFrame(columns=self.varlist) # Handle options if convert_dates is None: convert_dates = self._convert_dates if convert_categoricals is None: convert_categoricals = self._convert_categoricals if convert_missing is None: convert_missing = self._convert_missing if preserve_dtypes is None: preserve_dtypes = self._preserve_dtypes if columns is None: columns = self._columns if order_categoricals is None: order_categoricals = self._order_categoricals if index_col is None: index_col = self._index_col if nrows is None: nrows = self.nobs if (self.format_version >= 117) and (not self._value_labels_read): self._can_read_value_labels = True self._read_strls() # Read data dtype = self._dtype max_read_len = (self.nobs - self._lines_read) * dtype.itemsize read_len = nrows * dtype.itemsize read_len = min(read_len, max_read_len) if read_len <= 0: # Iterator has finished, should never be here unless # we are reading the file incrementally if convert_categoricals: self._read_value_labels() self.close() raise StopIteration offset = self._lines_read * dtype.itemsize self.path_or_buf.seek(self.data_location + offset) read_lines = min(nrows, self.nobs - self._lines_read) data = np.frombuffer(self.path_or_buf.read(read_len), dtype=dtype, count=read_lines) self._lines_read += read_lines if self._lines_read == self.nobs: self._can_read_value_labels = True self._data_read = True # if necessary, swap the byte order to native here if self.byteorder != self._native_byteorder: data = data.byteswap().newbyteorder() if convert_categoricals: self._read_value_labels() if len(data) == 0: data = DataFrame(columns=self.varlist) else: data = DataFrame.from_records(data) data.columns = self.varlist # If index is not specified, use actual row number rather than # restarting at 0 for each chunk. if index_col is None: ix = np.arange(self._lines_read - read_lines, self._lines_read) data = data.set_index(ix) if columns is not None: try: data = self._do_select_columns(data, columns) except ValueError: self.close() raise # Decode strings for col, typ in zip(data, self.typlist): if type(typ) is int: data[col] = data[col].apply( self._null_terminate, convert_dtype=True) data = self._insert_strls(data) cols_ = np.where(self.dtyplist)[0] # Convert columns (if needed) to match input type ix = data.index requires_type_conversion = False data_formatted = [] for i in cols_: if self.dtyplist[i] is not None: col = data.columns[i] dtype = data[col].dtype if dtype != np.dtype(object) and dtype != self.dtyplist[i]: requires_type_conversion = True data_formatted.append( (col, Series(data[col], ix, self.dtyplist[i]))) else: data_formatted.append((col, data[col])) if requires_type_conversion: data = DataFrame.from_dict(OrderedDict(data_formatted)) del data_formatted self._do_convert_missing(data, convert_missing) if convert_dates: cols = np.where(lmap(lambda x: any(x.startswith(fmt) for fmt in _date_formats), self.fmtlist))[0] for i in cols: col = data.columns[i] try: data[col] = _stata_elapsed_date_to_datetime_vec( data[col], self.fmtlist[i]) except ValueError: self.close() raise if convert_categoricals and self.format_version > 108: data = self._do_convert_categoricals(data, self.value_label_dict, self.lbllist, order_categoricals) if not preserve_dtypes: retyped_data = [] convert = False for col in data: dtype = data[col].dtype if dtype in (np.float16, np.float32): dtype = np.float64 convert = True elif dtype in (np.int8, np.int16, np.int32): dtype = np.int64 convert = True retyped_data.append((col, data[col].astype(dtype))) if convert: data = DataFrame.from_dict(OrderedDict(retyped_data)) if index_col is not None: data = data.set_index(data.pop(index_col)) return data def _do_convert_missing(self, data, convert_missing): # Check for missing values, and replace if found for i, colname in enumerate(data): fmt = self.typlist[i] if fmt not in self.VALID_RANGE: continue nmin, nmax = self.VALID_RANGE[fmt] series = data[colname] missing = np.logical_or(series < nmin, series > nmax) if not missing.any(): continue if convert_missing: # Replacement follows Stata notation missing_loc = np.argwhere(missing) umissing, umissing_loc = np.unique(series[missing], return_inverse=True) replacement = Series(series, dtype=np.object) for j, um in enumerate(umissing): missing_value = StataMissingValue(um) loc = missing_loc[umissing_loc == j] replacement.iloc[loc] = missing_value else: # All replacements are identical dtype = series.dtype if dtype not in (np.float32, np.float64): dtype = np.float64 replacement = Series(series, dtype=dtype) replacement[missing] = np.nan data[colname] = replacement def _insert_strls(self, data): if not hasattr(self, 'GSO') or len(self.GSO) == 0: return data for i, typ in enumerate(self.typlist): if typ != 'Q': continue # Wrap v_o in a string to allow uint64 values as keys on 32bit OS data.iloc[:, i] = [self.GSO[str(k)] for k in data.iloc[:, i]] return data def _do_select_columns(self, data, columns): if not self._column_selector_set: column_set = set(columns) if len(column_set) != len(columns): raise ValueError('columns contains duplicate entries') unmatched = column_set.difference(data.columns) if unmatched: raise ValueError('The following columns were not found in the ' 'Stata data set: ' + ', '.join(list(unmatched))) # Copy information for retained columns for later processing dtyplist = [] typlist = [] fmtlist = [] lbllist = [] for col in columns: i = data.columns.get_loc(col) dtyplist.append(self.dtyplist[i]) typlist.append(self.typlist[i]) fmtlist.append(self.fmtlist[i]) lbllist.append(self.lbllist[i]) self.dtyplist = dtyplist self.typlist = typlist self.fmtlist = fmtlist self.lbllist = lbllist self._column_selector_set = True return data[columns] def _do_convert_categoricals(self, data, value_label_dict, lbllist, order_categoricals): """ Converts categorical columns to Categorical type. """ value_labels = list(compat.iterkeys(value_label_dict)) cat_converted_data = [] for col, label in zip(data, lbllist): if label in value_labels: # Explicit call with ordered=True cat_data = Categorical(data[col], ordered=order_categoricals) categories = [] for category in cat_data.categories: if category in value_label_dict[label]: categories.append(value_label_dict[label][category]) else: categories.append(category) # Partially labeled try: cat_data.categories = categories except ValueError: vc = Series(categories).value_counts() repeats = list(vc.index[vc > 1]) repeats = '\n' + '-' * 80 + '\n'.join(repeats) raise ValueError('Value labels for column {col} are not ' 'unique. The repeated labels are:\n' '{repeats}' .format(col=col, repeats=repeats)) # TODO: is the next line needed above in the data(...) method? cat_data = Series(cat_data, index=data.index) cat_converted_data.append((col, cat_data)) else: cat_converted_data.append((col, data[col])) data = DataFrame.from_dict(OrderedDict(cat_converted_data)) return data def data_label(self): """Returns data label of Stata file""" return self.data_label def variable_labels(self): """Returns variable labels as a dict, associating each variable name with corresponding label """ return dict(zip(self.varlist, self._variable_labels)) def value_labels(self): """Returns a dict, associating each variable name a dict, associating each value its corresponding label """ if not self._value_labels_read: self._read_value_labels() return self.value_label_dict def _open_file_binary_write(fname): """ Open a binary file or no-op if file-like Parameters ---------- fname : string path, path object or buffer Returns ------- file : file-like object File object supporting write own : bool True if the file was created, otherwise False """ if hasattr(fname, 'write'): # if 'b' not in fname.mode: return fname, False return open(fname, "wb"), True def _set_endianness(endianness): if endianness.lower() in ["<", "little"]: return "<" elif endianness.lower() in [">", "big"]: return ">" else: # pragma : no cover raise ValueError("Endianness %s not understood" % endianness) def _pad_bytes(name, length): """ Takes a char string and pads it with null bytes until it's length chars """ return name + "\x00" * (length - len(name)) def _convert_datetime_to_stata_type(fmt): """ Converts from one of the stata date formats to a type in TYPE_MAP """ if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq", "%tq", "th", "%th", "ty", "%ty"]: return np.float64 # Stata expects doubles for SIFs else: raise NotImplementedError("Format %s not implemented" % fmt) def _maybe_convert_to_int_keys(convert_dates, varlist): new_dict = {} for key in convert_dates: if not convert_dates[key].startswith("%"): # make sure proper fmts convert_dates[key] = "%" + convert_dates[key] if key in varlist: new_dict.update({varlist.index(key): convert_dates[key]}) else: if not isinstance(key, int): raise ValueError("convert_dates key must be a " "column or an integer") new_dict.update({key: convert_dates[key]}) return new_dict def _dtype_to_stata_type(dtype, column): """ Converts dtype types to stata types. Returns the byte of the given ordinal. See TYPE_MAP and comments for an explanation. This is also explained in the dta spec. 1 - 244 are strings of this length Pandas Stata 251 - for int8 byte 252 - for int16 int 253 - for int32 long 254 - for float32 float 255 - for double double If there are dates to convert, then dtype will already have the correct type inserted. """ # TODO: expand to handle datetime to integer conversion if dtype.type == np.object_: # try to coerce it to the biggest string # not memory efficient, what else could we # do? itemsize = max_len_string_array(ensure_object(column.values)) return max(itemsize, 1) elif dtype == np.float64: return 255 elif dtype == np.float32: return 254 elif dtype == np.int32: return 253 elif dtype == np.int16: return 252 elif dtype == np.int8: return 251 else: # pragma : no cover raise NotImplementedError("Data type %s not supported." % dtype) def _dtype_to_default_stata_fmt(dtype, column, dta_version=114, force_strl=False): """ Maps numpy dtype to stata's default format for this type. Not terribly important since users can change this in Stata. Semantics are object -> "%DDs" where DD is the length of the string. If not a string, raise ValueError float64 -> "%10.0g" float32 -> "%9.0g" int64 -> "%9.0g" int32 -> "%12.0g" int16 -> "%8.0g" int8 -> "%8.0g" strl -> "%9s" """ # TODO: Refactor to combine type with format # TODO: expand this to handle a default datetime format? if dta_version < 117: max_str_len = 244 else: max_str_len = 2045 if force_strl: return '%9s' if dtype.type == np.object_: inferred_dtype = infer_dtype(column.dropna()) if not (inferred_dtype in ('string', 'unicode') or len(column) == 0): raise ValueError('Writing general object arrays is not supported') itemsize = max_len_string_array(ensure_object(column.values)) if itemsize > max_str_len: if dta_version >= 117: return '%9s' else: raise ValueError(excessive_string_length_error % column.name) return "%" + str(max(itemsize, 1)) + "s" elif dtype == np.float64: return "%10.0g" elif dtype == np.float32: return "%9.0g" elif dtype == np.int32: return "%12.0g" elif dtype == np.int8 or dtype == np.int16: return "%8.0g" else: # pragma : no cover raise NotImplementedError("Data type %s not supported." % dtype) class StataWriter(StataParser): """ A class for writing Stata binary dta files Parameters ---------- fname : path (string), buffer or path object string, path object (pathlib.Path or py._path.local.LocalPath) or object implementing a binary write() functions. If using a buffer then the buffer will not be automatically closed after the file is written. .. versionadded:: 0.23.0 support for pathlib, py.path. data : DataFrame Input to save convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information write_index : bool Write the index to Stata dataset. encoding : str Default is latin-1. Only latin-1 and ascii are supported. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder` time_stamp : datetime A datetime to use as file creation date. Default is the current time data_label : str A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. .. versionadded:: 0.19.0 Returns ------- writer : StataWriter instance The StataWriter instance has a write_file method, which will write the file to the given `fname`. Raises ------ NotImplementedError * If datetimes contain timezone information ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column dtype is not representable in Stata * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters Examples -------- >>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b']) >>> writer = StataWriter('./data_file.dta', data) >>> writer.write_file() Or with dates >>> from datetime import datetime >>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date']) >>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'}) >>> writer.write_file() """ _max_string_length = 244 @deprecate_kwarg(old_arg_name='encoding', new_arg_name=None) def __init__(self, fname, data, convert_dates=None, write_index=True, encoding="latin-1", byteorder=None, time_stamp=None, data_label=None, variable_labels=None): super(StataWriter, self).__init__() self._convert_dates = {} if convert_dates is None else convert_dates self._write_index = write_index self._encoding = 'latin-1' self._time_stamp = time_stamp self._data_label = data_label self._variable_labels = variable_labels self._own_file = True # attach nobs, nvars, data, varlist, typlist self._prepare_pandas(data) if byteorder is None: byteorder = sys.byteorder self._byteorder = _set_endianness(byteorder) self._fname = _stringify_path(fname) self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8} self._converted_names = {} def _write(self, to_write): """ Helper to call encode before writing to file for Python 3 compat. """ if compat.PY3: self._file.write(to_write.encode(self._encoding or self._default_encoding)) else: self._file.write(to_write) def _prepare_categoricals(self, data): """Check for categorical columns, retain categorical information for Stata file and convert categorical data to int""" is_cat = [is_categorical_dtype(data[col]) for col in data] self._is_col_cat = is_cat self._value_labels = [] if not any(is_cat): return data get_base_missing_value = StataMissingValue.get_base_missing_value data_formatted = [] for col, col_is_cat in zip(data, is_cat): if col_is_cat: self._value_labels.append(StataValueLabel(data[col])) dtype = data[col].cat.codes.dtype if dtype == np.int64: raise ValueError('It is not possible to export ' 'int64-based categorical data to Stata.') values = data[col].cat.codes.values.copy() # Upcast if needed so that correct missing values can be set if values.max() >= get_base_missing_value(dtype): if dtype == np.int8: dtype = np.int16 elif dtype == np.int16: dtype = np.int32 else: dtype = np.float64 values = np.array(values, dtype=dtype) # Replace missing values with Stata missing value for type values[values == -1] = get_base_missing_value(dtype) data_formatted.append((col, values)) else: data_formatted.append((col, data[col])) return DataFrame.from_dict(OrderedDict(data_formatted)) def _replace_nans(self, data): # return data """Checks floating point data columns for nans, and replaces these with the generic Stata for missing value (.)""" for c in data: dtype = data[c].dtype if dtype in (np.float32, np.float64): if dtype == np.float32: replacement = self.MISSING_VALUES['f'] else: replacement = self.MISSING_VALUES['d'] data[c] = data[c].fillna(replacement) return data def _update_strl_names(self): """No-op, forward compatibility""" pass def _check_column_names(self, data): """ Checks column names to ensure that they are valid Stata column names. This includes checks for: * Non-string names * Stata keywords * Variables that start with numbers * Variables with names that are too long When an illegal variable name is detected, it is converted, and if dates are exported, the variable name is propagated to the date conversion dictionary """ converted_names = {} columns = list(data.columns) original_columns = columns[:] duplicate_var_id = 0 for j, name in enumerate(columns): orig_name = name if not isinstance(name, string_types): name = text_type(name) for c in name: if ((c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and (c < '0' or c > '9') and c != '_'): name = name.replace(c, '_') # Variable name must not be a reserved word if name in self.RESERVED_WORDS: name = '_' + name # Variable name may not start with a number if name[0] >= '0' and name[0] <= '9': name = '_' + name name = name[:min(len(name), 32)] if not name == orig_name: # check for duplicates while columns.count(name) > 0: # prepend ascending number to avoid duplicates name = '_' + str(duplicate_var_id) + name name = name[:min(len(name), 32)] duplicate_var_id += 1 converted_names[orig_name] = name columns[j] = name data.columns = columns # Check date conversion, and fix key if needed if self._convert_dates: for c, o in zip(columns, original_columns): if c != o: self._convert_dates[c] = self._convert_dates[o] del self._convert_dates[o] if converted_names: conversion_warning = [] for orig_name, name in converted_names.items(): # need to possibly encode the orig name if its unicode try: orig_name = orig_name.encode('utf-8') except (UnicodeDecodeError, AttributeError): pass msg = '{0} -> {1}'.format(orig_name, name) conversion_warning.append(msg) ws = invalid_name_doc.format('\n '.join(conversion_warning)) warnings.warn(ws, InvalidColumnName) self._converted_names = converted_names self._update_strl_names() return data def _set_formats_and_types(self, data, dtypes): self.typlist = [] self.fmtlist = [] for col, dtype in dtypes.iteritems(): self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col])) self.typlist.append(_dtype_to_stata_type(dtype, data[col])) def _prepare_pandas(self, data): # NOTE: we might need a different API / class for pandas objects so # we can set different semantics - handle this with a PR to pandas.io data = data.copy() if self._write_index: data = data.reset_index() # Ensure column names are strings data = self._check_column_names(data) # Check columns for compatibility with stata, upcast if necessary # Raise if outside the supported range data = _cast_to_stata_types(data) # Replace NaNs with Stata missing values data = self._replace_nans(data) # Convert categoricals to int data, and strip labels data = self._prepare_categoricals(data) self.nobs, self.nvar = data.shape self.data = data self.varlist = data.columns.tolist() dtypes = data.dtypes # Ensure all date columns are converted for col in data: if col in self._convert_dates: continue if is_datetime64_dtype(data[col]): self._convert_dates[col] = 'tc' self._convert_dates = _maybe_convert_to_int_keys(self._convert_dates, self.varlist) for key in self._convert_dates: new_type = _convert_datetime_to_stata_type( self._convert_dates[key] ) dtypes[key] = np.dtype(new_type) self._set_formats_and_types(data, dtypes) # set the given format for the datetime cols if self._convert_dates is not None: for key in self._convert_dates: self.fmtlist[key] = self._convert_dates[key] def write_file(self): self._file, self._own_file = _open_file_binary_write(self._fname) try: self._write_header(time_stamp=self._time_stamp, data_label=self._data_label) self._write_map() self._write_variable_types() self._write_varnames() self._write_sortlist() self._write_formats() self._write_value_label_names() self._write_variable_labels() self._write_expansion_fields() self._write_characteristics() self._prepare_data() self._write_data() self._write_strls() self._write_value_labels() self._write_file_close_tag() self._write_map() finally: self._close() def _close(self): """ Close the file if it was created by the writer. If a buffer or file-like object was passed in, for example a GzipFile, then leave this file open for the caller to close. In either case, attempt to flush the file contents to ensure they are written to disk (if supported) """ # Some file-like objects might not support flush try: self._file.flush() except AttributeError: pass if self._own_file: self._file.close() def _write_map(self): """No-op, future compatibility""" pass def _write_file_close_tag(self): """No-op, future compatibility""" pass def _write_characteristics(self): """No-op, future compatibility""" pass def _write_strls(self): """No-op, future compatibility""" pass def _write_expansion_fields(self): """Write 5 zeros for expansion fields""" self._write(_pad_bytes("", 5)) def _write_value_labels(self): for vl in self._value_labels: self._file.write(vl.generate_value_label(self._byteorder, self._encoding)) def _write_header(self, data_label=None, time_stamp=None): byteorder = self._byteorder # ds_format - just use 114 self._file.write(struct.pack("b", 114)) # byteorder self._write(byteorder == ">" and "\x01" or "\x02") # filetype self._write("\x01") # unused self._write("\x00") # number of vars, 2 bytes self._file.write(struct.pack(byteorder + "h", self.nvar)[:2]) # number of obs, 4 bytes self._file.write(struct.pack(byteorder + "i", self.nobs)[:4]) # data label 81 bytes, char, null terminated if data_label is None: self._file.write(self._null_terminate(_pad_bytes("", 80))) else: self._file.write( self._null_terminate(_pad_bytes(data_label[:80], 80)) ) # time stamp, 18 bytes, char, null terminated # format dd Mon yyyy hh:mm if time_stamp is None: time_stamp = datetime.datetime.now() elif not isinstance(time_stamp, datetime.datetime): raise ValueError("time_stamp should be datetime type") # GH #13856 # Avoid locale-specific month conversion months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] month_lookup = {i + 1: month for i, month in enumerate(months)} ts = (time_stamp.strftime("%d ") + month_lookup[time_stamp.month] + time_stamp.strftime(" %Y %H:%M")) self._file.write(self._null_terminate(ts)) def _write_variable_types(self): for typ in self.typlist: self._file.write(struct.pack('B', typ)) def _write_varnames(self): # varlist names are checked by _check_column_names # varlist, requires null terminated for name in self.varlist: name = self._null_terminate(name, True) name = _pad_bytes(name[:32], 33) self._write(name) def _write_sortlist(self): # srtlist, 2*(nvar+1), int array, encoded by byteorder srtlist = _pad_bytes("", 2 * (self.nvar + 1)) self._write(srtlist) def _write_formats(self): # fmtlist, 49*nvar, char array for fmt in self.fmtlist: self._write(_pad_bytes(fmt, 49)) def _write_value_label_names(self): # lbllist, 33*nvar, char array for i in range(self.nvar): # Use variable name when categorical if self._is_col_cat[i]: name = self.varlist[i] name = self._null_terminate(name, True) name = _pad_bytes(name[:32], 33) self._write(name) else: # Default is empty label self._write(_pad_bytes("", 33)) def _write_variable_labels(self): # Missing labels are 80 blank characters plus null termination blank = _pad_bytes('', 81) if self._variable_labels is None: for i in range(self.nvar): self._write(blank) return for col in self.data: if col in self._variable_labels: label = self._variable_labels[col] if len(label) > 80: raise ValueError('Variable labels must be 80 characters ' 'or fewer') is_latin1 = all(ord(c) < 256 for c in label) if not is_latin1: raise ValueError('Variable labels must contain only ' 'characters that can be encoded in ' 'Latin-1') self._write(_pad_bytes(label, 81)) else: self._write(blank) def _convert_strls(self, data): """No-op, future compatibility""" return data def _prepare_data(self): data = self.data typlist = self.typlist convert_dates = self._convert_dates # 1. Convert dates if self._convert_dates is not None: for i, col in enumerate(data): if i in convert_dates: data[col] = _datetime_to_stata_elapsed_vec(data[col], self.fmtlist[i]) # 2. Convert strls data = self._convert_strls(data) # 3. Convert bad string data to '' and pad to correct length dtypes = [] data_cols = [] has_strings = False native_byteorder = self._byteorder == _set_endianness(sys.byteorder) for i, col in enumerate(data): typ = typlist[i] if typ <= self._max_string_length: has_strings = True data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,)) stype = 'S%d' % typ dtypes.append(('c' + str(i), stype)) string = data[col].str.encode(self._encoding) data_cols.append(string.values.astype(stype)) else: values = data[col].values dtype = data[col].dtype if not native_byteorder: dtype = dtype.newbyteorder(self._byteorder) dtypes.append(('c' + str(i), dtype)) data_cols.append(values) dtypes = np.dtype(dtypes) if has_strings or not native_byteorder: self.data = np.fromiter(zip(*data_cols), dtype=dtypes) else: self.data = data.to_records(index=False) def _write_data(self): data = self.data self._file.write(data.tobytes()) def _null_terminate(self, s, as_string=False): null_byte = '\x00' if compat.PY3 and not as_string: s += null_byte return s.encode(self._encoding) else: s += null_byte return s def _dtype_to_stata_type_117(dtype, column, force_strl): """ Converts dtype types to stata types. Returns the byte of the given ordinal. See TYPE_MAP and comments for an explanation. This is also explained in the dta spec. 1 - 2045 are strings of this length Pandas Stata 32768 - for object strL 65526 - for int8 byte 65527 - for int16 int 65528 - for int32 long 65529 - for float32 float 65530 - for double double If there are dates to convert, then dtype will already have the correct type inserted. """ # TODO: expand to handle datetime to integer conversion if force_strl: return 32768 if dtype.type == np.object_: # try to coerce it to the biggest string # not memory efficient, what else could we # do? itemsize = max_len_string_array(ensure_object(column.values)) itemsize = max(itemsize, 1) if itemsize <= 2045: return itemsize return 32768 elif dtype == np.float64: return 65526 elif dtype == np.float32: return 65527 elif dtype == np.int32: return 65528 elif dtype == np.int16: return 65529 elif dtype == np.int8: return 65530 else: # pragma : no cover raise NotImplementedError("Data type %s not supported." % dtype) def _bytes(s, encoding): if compat.PY3: return bytes(s, encoding) else: return bytes(s.encode(encoding)) def _pad_bytes_new(name, length): """ Takes a bytes instance and pads it with null bytes until it's length chars. """ if isinstance(name, string_types): name = _bytes(name, 'utf-8') return name + b'\x00' * (length - len(name)) class StataStrLWriter(object): """ Converter for Stata StrLs Stata StrLs map 8 byte values to strings which are stored using a dictionary-like format where strings are keyed to two values. Parameters ---------- df : DataFrame DataFrame to convert columns : list List of columns names to convert to StrL version : int, optional dta version. Currently supports 117, 118 and 119 byteorder : str, optional Can be ">", "<", "little", or "big". default is `sys.byteorder` Notes ----- Supports creation of the StrL block of a dta file for dta versions 117, 118 and 119. These differ in how the GSO is stored. 118 and 119 store the GSO lookup value as a uint32 and a uint64, while 117 uses two uint32s. 118 and 119 also encode all strings as unicode which is required by the format. 117 uses 'latin-1' a fixed width encoding that extends the 7-bit ascii table with an additional 128 characters. """ def __init__(self, df, columns, version=117, byteorder=None): if version not in (117, 118, 119): raise ValueError('Only dta versions 117, 118 and 119 supported') self._dta_ver = version self.df = df self.columns = columns self._gso_table = OrderedDict((('', (0, 0)),)) if byteorder is None: byteorder = sys.byteorder self._byteorder = _set_endianness(byteorder) gso_v_type = 'I' # uint32 gso_o_type = 'Q' # uint64 self._encoding = 'utf-8' if version == 117: o_size = 4 gso_o_type = 'I' # 117 used uint32 self._encoding = 'latin-1' elif version == 118: o_size = 6 else: # version == 119 o_size = 5 self._o_offet = 2 ** (8 * (8 - o_size)) self._gso_o_type = gso_o_type self._gso_v_type = gso_v_type def _convert_key(self, key): v, o = key return v + self._o_offet * o def generate_table(self): """ Generates the GSO lookup table for the DataFRame Returns ------- gso_table : OrderedDict Ordered dictionary using the string found as keys and their lookup position (v,o) as values gso_df : DataFrame DataFrame where strl columns have been converted to (v,o) values Notes ----- Modifies the DataFrame in-place. The DataFrame returned encodes the (v,o) values as uint64s. The encoding depends on teh dta version, and can be expressed as enc = v + o * 2 ** (o_size * 8) so that v is stored in the lower bits and o is in the upper bits. o_size is * 117: 4 * 118: 6 * 119: 5 """ gso_table = self._gso_table gso_df = self.df columns = list(gso_df.columns) selected = gso_df[self.columns] col_index = [(col, columns.index(col)) for col in self.columns] keys = np.empty(selected.shape, dtype=np.uint64) for o, (idx, row) in enumerate(selected.iterrows()): for j, (col, v) in enumerate(col_index): val = row[col] key = gso_table.get(val, None) if key is None: # Stata prefers human numbers key = (v + 1, o + 1) gso_table[val] = key keys[o, j] = self._convert_key(key) for i, col in enumerate(self.columns): gso_df[col] = keys[:, i] return gso_table, gso_df def _encode(self, s): """ Python 3 compatibility shim """ if compat.PY3: return s.encode(self._encoding) else: if isinstance(s, text_type): return s.encode(self._encoding) return s def generate_blob(self, gso_table): """ Generates the binary blob of GSOs that is written to the dta file. Parameters ---------- gso_table : OrderedDict Ordered dictionary (str, vo) Returns ------- gso : bytes Binary content of dta file to be placed between strl tags Notes ----- Output format depends on dta version. 117 uses two uint32s to express v and o while 118+ uses a uint32 for v and a uint64 for o. """ # Format information # Length includes null term # 117 # GSOvvvvooootllllxxxxxxxxxxxxxxx...x # 3 u4 u4 u1 u4 string + null term # # 118, 119 # GSOvvvvooooooootllllxxxxxxxxxxxxxxx...x # 3 u4 u8 u1 u4 string + null term bio = BytesIO() gso = _bytes('GSO', 'ascii') gso_type = struct.pack(self._byteorder + 'B', 130) null = struct.pack(self._byteorder + 'B', 0) v_type = self._byteorder + self._gso_v_type o_type = self._byteorder + self._gso_o_type len_type = self._byteorder + 'I' for strl, vo in gso_table.items(): if vo == (0, 0): continue v, o = vo # GSO bio.write(gso) # vvvv bio.write(struct.pack(v_type, v)) # oooo / oooooooo bio.write(struct.pack(o_type, o)) # t bio.write(gso_type) # llll encoded = self._encode(strl) bio.write(struct.pack(len_type, len(encoded) + 1)) # xxx...xxx s = _bytes(strl, 'utf-8') bio.write(s) bio.write(null) bio.seek(0) return bio.read() class StataWriter117(StataWriter): """ A class for writing Stata binary dta files in Stata 13 format (117) .. versionadded:: 0.23.0 Parameters ---------- fname : path (string), buffer or path object string, path object (pathlib.Path or py._path.local.LocalPath) or object implementing a binary write() functions. If using a buffer then the buffer will not be automatically closed after the file is written. data : DataFrame Input to save convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information write_index : bool Write the index to Stata dataset. encoding : str Default is latin-1. Only latin-1 and ascii are supported. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder` time_stamp : datetime A datetime to use as file creation date. Default is the current time data_label : str A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. convert_strl : list List of columns names to convert to Stata StrL format. Columns with more than 2045 characters are aautomatically written as StrL. Smaller columns can be converted by including the column name. Using StrLs can reduce output file size when strings are longer than 8 characters, and either frequently repeated or sparse. Returns ------- writer : StataWriter117 instance The StataWriter117 instance has a write_file method, which will write the file to the given `fname`. Raises ------ NotImplementedError * If datetimes contain timezone information ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column dtype is not representable in Stata * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters Examples -------- >>> from pandas.io.stata import StataWriter117 >>> data = pd.DataFrame([[1.0, 1, 'a']], columns=['a', 'b', 'c']) >>> writer = StataWriter117('./data_file.dta', data) >>> writer.write_file() Or with long strings stored in strl format >>> data = pd.DataFrame([['A relatively long string'], [''], ['']], ... columns=['strls']) >>> writer = StataWriter117('./data_file_with_long_strings.dta', data, ... convert_strl=['strls']) >>> writer.write_file() """ _max_string_length = 2045 @deprecate_kwarg(old_arg_name='encoding', new_arg_name=None) def __init__(self, fname, data, convert_dates=None, write_index=True, encoding="latin-1", byteorder=None, time_stamp=None, data_label=None, variable_labels=None, convert_strl=None): # Shallow copy since convert_strl might be modified later self._convert_strl = [] if convert_strl is None else convert_strl[:] super(StataWriter117, self).__init__(fname, data, convert_dates, write_index, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, variable_labels=variable_labels) self._map = None self._strl_blob = None @staticmethod def _tag(val, tag): """Surround val with <tag></tag>""" if isinstance(val, str) and compat.PY3: val = _bytes(val, 'utf-8') return (_bytes('<' + tag + '>', 'utf-8') + val + _bytes('</' + tag + '>', 'utf-8')) def _update_map(self, tag): """Update map location for tag with file position""" self._map[tag] = self._file.tell() def _write_header(self, data_label=None, time_stamp=None): """Write the file header""" byteorder = self._byteorder self._file.write(_bytes('<stata_dta>', 'utf-8')) bio = BytesIO() # ds_format - 117 bio.write(self._tag(_bytes('117', 'utf-8'), 'release')) # byteorder bio.write(self._tag(byteorder == ">" and "MSF" or "LSF", 'byteorder')) # number of vars, 2 bytes assert self.nvar < 2 ** 16 bio.write(self._tag(struct.pack(byteorder + "H", self.nvar), 'K')) # number of obs, 4 bytes bio.write(self._tag(struct.pack(byteorder + "I", self.nobs), 'N')) # data label 81 bytes, char, null terminated label = data_label[:80] if data_label is not None else '' label_len = struct.pack(byteorder + "B", len(label)) label = label_len + _bytes(label, 'utf-8') bio.write(self._tag(label, 'label')) # time stamp, 18 bytes, char, null terminated # format dd Mon yyyy hh:mm if time_stamp is None: time_stamp = datetime.datetime.now() elif not isinstance(time_stamp, datetime.datetime): raise ValueError("time_stamp should be datetime type") # Avoid locale-specific month conversion months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] month_lookup = {i + 1: month for i, month in enumerate(months)} ts = (time_stamp.strftime("%d ") + month_lookup[time_stamp.month] + time_stamp.strftime(" %Y %H:%M")) # '\x11' added due to inspection of Stata file ts = b'\x11' + _bytes(ts, 'utf8') bio.write(self._tag(ts, 'timestamp')) bio.seek(0) self._file.write(self._tag(bio.read(), 'header')) def _write_map(self): """Called twice during file write. The first populates the values in the map with 0s. The second call writes the final map locations when all blocks have been written.""" if self._map is None: self._map = OrderedDict((('stata_data', 0), ('map', self._file.tell()), ('variable_types', 0), ('varnames', 0), ('sortlist', 0), ('formats', 0), ('value_label_names', 0), ('variable_labels', 0), ('characteristics', 0), ('data', 0), ('strls', 0), ('value_labels', 0), ('stata_data_close', 0), ('end-of-file', 0))) # Move to start of map self._file.seek(self._map['map']) bio = BytesIO() for val in self._map.values(): bio.write(struct.pack(self._byteorder + 'Q', val)) bio.seek(0) self._file.write(self._tag(bio.read(), 'map')) def _write_variable_types(self): self._update_map('variable_types') bio = BytesIO() for typ in self.typlist: bio.write(struct.pack(self._byteorder + 'H', typ)) bio.seek(0) self._file.write(self._tag(bio.read(), 'variable_types')) def _write_varnames(self): self._update_map('varnames') bio = BytesIO() for name in self.varlist: name = self._null_terminate(name, True) name = _pad_bytes_new(name[:32], 33) bio.write(name) bio.seek(0) self._file.write(self._tag(bio.read(), 'varnames')) def _write_sortlist(self): self._update_map('sortlist') self._file.write(self._tag(b'\x00\00' * (self.nvar + 1), 'sortlist')) def _write_formats(self): self._update_map('formats') bio = BytesIO() for fmt in self.fmtlist: bio.write(_pad_bytes_new(fmt, 49)) bio.seek(0) self._file.write(self._tag(bio.read(), 'formats')) def _write_value_label_names(self): self._update_map('value_label_names') bio = BytesIO() for i in range(self.nvar): # Use variable name when categorical name = '' # default name if self._is_col_cat[i]: name = self.varlist[i] name = self._null_terminate(name, True) name = _pad_bytes_new(name[:32], 33) bio.write(name) bio.seek(0) self._file.write(self._tag(bio.read(), 'value_label_names')) def _write_variable_labels(self): # Missing labels are 80 blank characters plus null termination self._update_map('variable_labels') bio = BytesIO() blank = _pad_bytes_new('', 81) if self._variable_labels is None: for _ in range(self.nvar): bio.write(blank) bio.seek(0) self._file.write(self._tag(bio.read(), 'variable_labels')) return for col in self.data: if col in self._variable_labels: label = self._variable_labels[col] if len(label) > 80: raise ValueError('Variable labels must be 80 characters ' 'or fewer') is_latin1 = all(ord(c) < 256 for c in label) if not is_latin1: raise ValueError('Variable labels must contain only ' 'characters that can be encoded in ' 'Latin-1') bio.write(_pad_bytes_new(label, 81)) else: bio.write(blank) bio.seek(0) self._file.write(self._tag(bio.read(), 'variable_labels')) def _write_characteristics(self): self._update_map('characteristics') self._file.write(self._tag(b'', 'characteristics')) def _write_data(self): self._update_map('data') data = self.data self._file.write(b'<data>') self._file.write(data.tobytes()) self._file.write(b'</data>') def _write_strls(self): self._update_map('strls') strls = b'' if self._strl_blob is not None: strls = self._strl_blob self._file.write(self._tag(strls, 'strls')) def _write_expansion_fields(self): """No-op in dta 117+""" pass def _write_value_labels(self): self._update_map('value_labels') bio = BytesIO() for vl in self._value_labels: lab = vl.generate_value_label(self._byteorder, self._encoding) lab = self._tag(lab, 'lbl') bio.write(lab) bio.seek(0) self._file.write(self._tag(bio.read(), 'value_labels')) def _write_file_close_tag(self): self._update_map('stata_data_close') self._file.write(_bytes('</stata_dta>', 'utf-8')) self._update_map('end-of-file') def _update_strl_names(self): """Update column names for conversion to strl if they might have been changed to comply with Stata naming rules""" # Update convert_strl if names changed for orig, new in self._converted_names.items(): if orig in self._convert_strl: idx = self._convert_strl.index(orig) self._convert_strl[idx] = new def _convert_strls(self, data): """Convert columns to StrLs if either very large or in the convert_strl variable""" convert_cols = [] for i, col in enumerate(data): if self.typlist[i] == 32768 or col in self._convert_strl: convert_cols.append(col) if convert_cols: ssw = StataStrLWriter(data, convert_cols) tab, new_data = ssw.generate_table() data = new_data self._strl_blob = ssw.generate_blob(tab) return data def _set_formats_and_types(self, data, dtypes): self.typlist = [] self.fmtlist = [] for col, dtype in dtypes.iteritems(): force_strl = col in self._convert_strl fmt = _dtype_to_default_stata_fmt(dtype, data[col], dta_version=117, force_strl=force_strl) self.fmtlist.append(fmt) self.typlist.append(_dtype_to_stata_type_117(dtype, data[col], force_strl))
bsd-3-clause
cybernet14/scikit-learn
sklearn/utils/setup.py
296
2884
import os from os.path import join from sklearn._build_utils import get_blas_info def configuration(parent_package='', top_path=None): import numpy from numpy.distutils.misc_util import Configuration config = Configuration('utils', parent_package, top_path) config.add_subpackage('sparsetools') cblas_libs, blas_info = get_blas_info() cblas_compile_args = blas_info.pop('extra_compile_args', []) cblas_includes = [join('..', 'src', 'cblas'), numpy.get_include(), blas_info.pop('include_dirs', [])] libraries = [] if os.name == 'posix': libraries.append('m') cblas_libs.append('m') config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'], libraries=libraries) config.add_extension('arrayfuncs', sources=['arrayfuncs.c'], depends=[join('src', 'cholesky_delete.h')], libraries=cblas_libs, include_dirs=cblas_includes, extra_compile_args=cblas_compile_args, **blas_info ) config.add_extension( 'murmurhash', sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')], include_dirs=['src']) config.add_extension('lgamma', sources=['lgamma.c', join('src', 'gamma.c')], include_dirs=['src'], libraries=libraries) config.add_extension('graph_shortest_path', sources=['graph_shortest_path.c'], include_dirs=[numpy.get_include()]) config.add_extension('fast_dict', sources=['fast_dict.cpp'], language="c++", include_dirs=[numpy.get_include()], libraries=libraries) config.add_extension('seq_dataset', sources=['seq_dataset.c'], include_dirs=[numpy.get_include()]) config.add_extension('weight_vector', sources=['weight_vector.c'], include_dirs=cblas_includes, libraries=cblas_libs, **blas_info) config.add_extension("_random", sources=["_random.c"], include_dirs=[numpy.get_include()], libraries=libraries) config.add_extension("_logistic_sigmoid", sources=["_logistic_sigmoid.c"], include_dirs=[numpy.get_include()], libraries=libraries) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
bsd-3-clause
damaggu/SAMRI
samri/report/registration.py
1
3055
import hashlib import multiprocessing as mp import pandas as pd from os import path from joblib import Parallel, delayed from nipype.interfaces import ants, fsl def measure_sim(path_template, substitutions, reference, metric="MI", radius_or_number_of_bins = 8, sampling_strategy = "None", sampling_percentage=0.3, mask="", ): """Return a similarity metric score for two 3d images""" image_path = path_template.format(**substitutions) image_path = path.abspath(path.expanduser(image_path)) #some BIDS identifier combinations may not exist: if not path.isfile(image_path): return {} file_data = {} file_data["path"] = image_path file_data["session"] = substitutions["session"] file_data["subject"] = substitutions["subject"] file_data["acquisition"] = substitutions["acquisition"] if "/func/" in path_template or "/dwi/" in path_template: image_name = path.basename(file_data["path"]) merged_image_name = "merged_"+image_name merged_image_path = path.join("/tmp",merged_image_name) if not path.isfile(merged_image_path): temporal_mean = fsl.MeanImage() temporal_mean.inputs.in_file = image_path temporal_mean.inputs.out_file = merged_image_path temporal_mean_res = temporal_mean.run() image_path = temporal_mean_res.outputs.out_file else: image_path = merged_image_path sim = ants.MeasureImageSimilarity() sim.inputs.dimension = 3 sim.inputs.metric = metric sim.inputs.fixed_image = reference sim.inputs.moving_image = image_path sim.inputs.metric_weight = 1.0 sim.inputs.radius_or_number_of_bins = radius_or_number_of_bins sim.inputs.sampling_strategy = sampling_strategy sim.inputs.sampling_percentage = sampling_percentage if mask: sim.inputs.fixed_image_mask = mask #sim.inputs.moving_image_mask = 'mask.nii.gz' sim_res = sim.run() file_data["similarity"] = sim_res.outputs.similarity return file_data def get_scores(file_template, substitutions, reference, metric="MI", radius_or_number_of_bins = 8, sampling_strategy = "None", sampling_percentage=0.3, save_as="", mask="", ): """Create a `pandas.DataFrame` (optionally savable as `.csv`), containing the similarity scores and BIDS identifier fields for images from a BIDS directory. """ reference = path.abspath(path.expanduser(reference)) n_jobs = mp.cpu_count()-2 similarity_data = Parallel(n_jobs=n_jobs, verbose=0, backend="threading")(map(delayed(measure_sim), [file_template]*len(substitutions), substitutions, [reference]*len(substitutions), [metric]*len(substitutions), [radius_or_number_of_bins]*len(substitutions), [sampling_strategy]*len(substitutions), [sampling_percentage]*len(substitutions), [mask]*len(substitutions), )) df = pd.DataFrame.from_dict(similarity_data) df.dropna(axis=0, how='any', inplace=True) #some rows will be emtpy if save_as: save_as = path.abspath(path.expanduser(save_as)) if save_as.lower().endswith('.csv'): df.to_csv(save_as) else: raise ValueError("Please specify an output path ending in any one of "+",".join((".csv",))+".") return df
gpl-3.0
gfyoung/pandas
pandas/tests/plotting/test_datetimelike.py
2
55641
""" Test cases for time series specific (freq conversion, etc) """ from datetime import date, datetime, time, timedelta import pickle import sys import numpy as np import pytest from pandas._libs.tslibs import BaseOffset, to_offset import pandas.util._test_decorators as td from pandas import DataFrame, Index, NaT, Series, isna, to_datetime import pandas._testing as tm from pandas.core.indexes.datetimes import DatetimeIndex, bdate_range, date_range from pandas.core.indexes.period import Period, PeriodIndex, period_range from pandas.core.indexes.timedeltas import timedelta_range from pandas.tests.plotting.common import TestPlotBase from pandas.tseries.offsets import WeekOfMonth pytestmark = pytest.mark.slow @td.skip_if_no_mpl class TestTSPlot(TestPlotBase): def setup_method(self, method): TestPlotBase.setup_method(self, method) self.freq = ["S", "T", "H", "D", "W", "M", "Q", "A"] idx = [period_range("12/31/1999", freq=x, periods=100) for x in self.freq] self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx] self.period_df = [ DataFrame(np.random.randn(len(x), 3), index=x, columns=["A", "B", "C"]) for x in idx ] freq = ["S", "T", "H", "D", "W", "M", "Q-DEC", "A", "1B30Min"] idx = [date_range("12/31/1999", freq=x, periods=100) for x in freq] self.datetime_ser = [Series(np.random.randn(len(x)), x) for x in idx] self.datetime_df = [ DataFrame(np.random.randn(len(x), 3), index=x, columns=["A", "B", "C"]) for x in idx ] def teardown_method(self, method): tm.close() def test_ts_plot_with_tz(self, tz_aware_fixture): # GH2877, GH17173, GH31205, GH31580 tz = tz_aware_fixture index = date_range("1/1/2011", periods=2, freq="H", tz=tz) ts = Series([188.5, 328.25], index=index) with tm.assert_produces_warning(None): _check_plot_works(ts.plot) ax = ts.plot() xdata = list(ax.get_lines())[0].get_xdata() # Check first and last points' labels are correct assert (xdata[0].hour, xdata[0].minute) == (0, 0) assert (xdata[-1].hour, xdata[-1].minute) == (1, 0) def test_fontsize_set_correctly(self): # For issue #8765 df = DataFrame(np.random.randn(10, 9), index=range(10)) fig, ax = self.plt.subplots() df.plot(fontsize=2, ax=ax) for label in ax.get_xticklabels() + ax.get_yticklabels(): assert label.get_fontsize() == 2 def test_frame_inferred(self): # inferred freq idx = date_range("1/1/1987", freq="MS", periods=100) idx = DatetimeIndex(idx.values, freq=None) df = DataFrame(np.random.randn(len(idx), 3), index=idx) _check_plot_works(df.plot) # axes freq idx = idx[0:40].union(idx[45:99]) df2 = DataFrame(np.random.randn(len(idx), 3), index=idx) _check_plot_works(df2.plot) # N > 1 idx = date_range("2008-1-1 00:15:00", freq="15T", periods=10) idx = DatetimeIndex(idx.values, freq=None) df = DataFrame(np.random.randn(len(idx), 3), index=idx) _check_plot_works(df.plot) def test_is_error_nozeroindex(self): # GH11858 i = np.array([1, 2, 3]) a = DataFrame(i, index=i) _check_plot_works(a.plot, xerr=a) _check_plot_works(a.plot, yerr=a) def test_nonnumeric_exclude(self): idx = date_range("1/1/1987", freq="A", periods=3) df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]}, idx) fig, ax = self.plt.subplots() df.plot(ax=ax) # it works assert len(ax.get_lines()) == 1 # B was plotted self.plt.close(fig) msg = "no numeric data to plot" with pytest.raises(TypeError, match=msg): df["A"].plot() def test_tsplot(self): _, ax = self.plt.subplots() ts = tm.makeTimeSeries() for s in self.period_ser: _check_plot_works(s.plot, ax=ax) for s in self.datetime_ser: _check_plot_works(s.plot, ax=ax) _, ax = self.plt.subplots() ts.plot(style="k", ax=ax) color = (0.0, 0.0, 0.0, 1) assert color == ax.get_lines()[0].get_color() def test_both_style_and_color(self): ts = tm.makeTimeSeries() msg = ( "Cannot pass 'style' string with a color symbol and 'color' " "keyword argument. Please use one or the other or pass 'style' " "without a color symbol" ) with pytest.raises(ValueError, match=msg): ts.plot(style="b-", color="#000099") s = ts.reset_index(drop=True) with pytest.raises(ValueError, match=msg): s.plot(style="b-", color="#000099") def test_high_freq(self): freaks = ["ms", "us"] for freq in freaks: _, ax = self.plt.subplots() rng = date_range("1/1/2012", periods=100, freq=freq) ser = Series(np.random.randn(len(rng)), rng) _check_plot_works(ser.plot, ax=ax) def test_get_datevalue(self): from pandas.plotting._matplotlib.converter import get_datevalue assert get_datevalue(None, "D") is None assert get_datevalue(1987, "A") == 1987 assert get_datevalue(Period(1987, "A"), "M") == Period("1987-12", "M").ordinal assert get_datevalue("1/1/1987", "D") == Period("1987-1-1", "D").ordinal def test_ts_plot_format_coord(self): def check_format_of_first_point(ax, expected_string): first_line = ax.get_lines()[0] first_x = first_line.get_xdata()[0].ordinal first_y = first_line.get_ydata()[0] try: assert expected_string == ax.format_coord(first_x, first_y) except (ValueError): pytest.skip( "skipping test because issue forming test comparison GH7664" ) annual = Series(1, index=date_range("2014-01-01", periods=3, freq="A-DEC")) _, ax = self.plt.subplots() annual.plot(ax=ax) check_format_of_first_point(ax, "t = 2014 y = 1.000000") # note this is added to the annual plot already in existence, and # changes its freq field daily = Series(1, index=date_range("2014-01-01", periods=3, freq="D")) daily.plot(ax=ax) check_format_of_first_point(ax, "t = 2014-01-01 y = 1.000000") tm.close() def test_line_plot_period_series(self): for s in self.period_ser: _check_plot_works(s.plot, s.index.freq) @pytest.mark.parametrize( "frqncy", ["1S", "3S", "5T", "7H", "4D", "8W", "11M", "3A"] ) def test_line_plot_period_mlt_series(self, frqncy): # test period index line plot for series with multiples (`mlt`) of the # frequency (`frqncy`) rule code. tests resolution of issue #14763 idx = period_range("12/31/1999", freq=frqncy, periods=100) s = Series(np.random.randn(len(idx)), idx) _check_plot_works(s.plot, s.index.freq.rule_code) def test_line_plot_datetime_series(self): for s in self.datetime_ser: _check_plot_works(s.plot, s.index.freq.rule_code) def test_line_plot_period_frame(self): for df in self.period_df: _check_plot_works(df.plot, df.index.freq) @pytest.mark.parametrize( "frqncy", ["1S", "3S", "5T", "7H", "4D", "8W", "11M", "3A"] ) def test_line_plot_period_mlt_frame(self, frqncy): # test period index line plot for DataFrames with multiples (`mlt`) # of the frequency (`frqncy`) rule code. tests resolution of issue # #14763 idx = period_range("12/31/1999", freq=frqncy, periods=100) df = DataFrame(np.random.randn(len(idx), 3), index=idx, columns=["A", "B", "C"]) freq = df.index.asfreq(df.index.freq.rule_code).freq _check_plot_works(df.plot, freq) def test_line_plot_datetime_frame(self): for df in self.datetime_df: freq = df.index.to_period(df.index.freq.rule_code).freq _check_plot_works(df.plot, freq) def test_line_plot_inferred_freq(self): for ser in self.datetime_ser: ser = Series(ser.values, Index(np.asarray(ser.index))) _check_plot_works(ser.plot, ser.index.inferred_freq) ser = ser[[0, 3, 5, 6]] _check_plot_works(ser.plot) def test_fake_inferred_business(self): _, ax = self.plt.subplots() rng = date_range("2001-1-1", "2001-1-10") ts = Series(range(len(rng)), index=rng) ts = ts[:3].append(ts[5:]) ts.plot(ax=ax) assert not hasattr(ax, "freq") def test_plot_offset_freq(self): ser = tm.makeTimeSeries() _check_plot_works(ser.plot) dr = date_range(ser.index[0], freq="BQS", periods=10) ser = Series(np.random.randn(len(dr)), index=dr) _check_plot_works(ser.plot) def test_plot_multiple_inferred_freq(self): dr = Index([datetime(2000, 1, 1), datetime(2000, 1, 6), datetime(2000, 1, 11)]) ser = Series(np.random.randn(len(dr)), index=dr) _check_plot_works(ser.plot) def test_uhf(self): import pandas.plotting._matplotlib.converter as conv idx = date_range("2012-6-22 21:59:51.960928", freq="L", periods=500) df = DataFrame(np.random.randn(len(idx), 2), index=idx) _, ax = self.plt.subplots() df.plot(ax=ax) axis = ax.get_xaxis() tlocs = axis.get_ticklocs() tlabels = axis.get_ticklabels() for loc, label in zip(tlocs, tlabels): xp = conv._from_ordinal(loc).strftime("%H:%M:%S.%f") rs = str(label.get_text()) if len(rs): assert xp == rs def test_irreg_hf(self): idx = date_range("2012-6-22 21:59:51", freq="S", periods=100) df = DataFrame(np.random.randn(len(idx), 2), index=idx) irreg = df.iloc[[0, 1, 3, 4]] _, ax = self.plt.subplots() irreg.plot(ax=ax) diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff() sec = 1.0 / 24 / 60 / 60 assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all() _, ax = self.plt.subplots() df2 = df.copy() df2.index = df.index.astype(object) df2.plot(ax=ax) diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff() assert (np.fabs(diffs[1:] - sec) < 1e-8).all() def test_irregular_datetime64_repr_bug(self): ser = tm.makeTimeSeries() ser = ser[[0, 1, 2, 7]] _, ax = self.plt.subplots() ret = ser.plot(ax=ax) assert ret is not None for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index): assert rs == xp def test_business_freq(self): bts = tm.makePeriodSeries() _, ax = self.plt.subplots() bts.plot(ax=ax) assert ax.get_lines()[0].get_xydata()[0, 0] == bts.index[0].ordinal idx = ax.get_lines()[0].get_xdata() assert PeriodIndex(data=idx).freqstr == "B" def test_business_freq_convert(self): bts = tm.makeTimeSeries(300).asfreq("BM") ts = bts.to_period("M") _, ax = self.plt.subplots() bts.plot(ax=ax) assert ax.get_lines()[0].get_xydata()[0, 0] == ts.index[0].ordinal idx = ax.get_lines()[0].get_xdata() assert PeriodIndex(data=idx).freqstr == "M" def test_freq_with_no_period_alias(self): # GH34487 freq = WeekOfMonth() bts = tm.makeTimeSeries(5).asfreq(freq) _, ax = self.plt.subplots() bts.plot(ax=ax) idx = ax.get_lines()[0].get_xdata() msg = "freq not specified and cannot be inferred" with pytest.raises(ValueError, match=msg): PeriodIndex(data=idx) def test_nonzero_base(self): # GH2571 idx = date_range("2012-12-20", periods=24, freq="H") + timedelta(minutes=30) df = DataFrame(np.arange(24), index=idx) _, ax = self.plt.subplots() df.plot(ax=ax) rs = ax.get_lines()[0].get_xdata() assert not Index(rs).is_normalized def test_dataframe(self): bts = DataFrame({"a": tm.makeTimeSeries()}) _, ax = self.plt.subplots() bts.plot(ax=ax) idx = ax.get_lines()[0].get_xdata() tm.assert_index_equal(bts.index.to_period(), PeriodIndex(idx)) def test_axis_limits(self): def _test(ax): xlim = ax.get_xlim() ax.set_xlim(xlim[0] - 5, xlim[1] + 10) result = ax.get_xlim() assert result[0] == xlim[0] - 5 assert result[1] == xlim[1] + 10 # string expected = (Period("1/1/2000", ax.freq), Period("4/1/2000", ax.freq)) ax.set_xlim("1/1/2000", "4/1/2000") result = ax.get_xlim() assert int(result[0]) == expected[0].ordinal assert int(result[1]) == expected[1].ordinal # datetime expected = (Period("1/1/2000", ax.freq), Period("4/1/2000", ax.freq)) ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1)) result = ax.get_xlim() assert int(result[0]) == expected[0].ordinal assert int(result[1]) == expected[1].ordinal fig = ax.get_figure() self.plt.close(fig) ser = tm.makeTimeSeries() _, ax = self.plt.subplots() ser.plot(ax=ax) _test(ax) _, ax = self.plt.subplots() df = DataFrame({"a": ser, "b": ser + 1}) df.plot(ax=ax) _test(ax) df = DataFrame({"a": ser, "b": ser + 1}) axes = df.plot(subplots=True) for ax in axes: _test(ax) def test_get_finder(self): import pandas.plotting._matplotlib.converter as conv assert conv.get_finder(to_offset("B")) == conv._daily_finder assert conv.get_finder(to_offset("D")) == conv._daily_finder assert conv.get_finder(to_offset("M")) == conv._monthly_finder assert conv.get_finder(to_offset("Q")) == conv._quarterly_finder assert conv.get_finder(to_offset("A")) == conv._annual_finder assert conv.get_finder(to_offset("W")) == conv._daily_finder def test_finder_daily(self): day_lst = [10, 40, 252, 400, 950, 2750, 10000] xpl1 = xpl2 = [Period("1999-1-1", freq="B").ordinal] * len(day_lst) rs1 = [] rs2 = [] for i, n in enumerate(day_lst): rng = bdate_range("1999-1-1", periods=n) ser = Series(np.random.randn(len(rng)), rng) _, ax = self.plt.subplots() ser.plot(ax=ax) xaxis = ax.get_xaxis() rs1.append(xaxis.get_majorticklocs()[0]) vmin, vmax = ax.get_xlim() ax.set_xlim(vmin + 0.9, vmax) rs2.append(xaxis.get_majorticklocs()[0]) self.plt.close(ax.get_figure()) assert rs1 == xpl1 assert rs2 == xpl2 def test_finder_quarterly(self): yrs = [3.5, 11] xpl1 = xpl2 = [Period("1988Q1").ordinal] * len(yrs) rs1 = [] rs2 = [] for i, n in enumerate(yrs): rng = period_range("1987Q2", periods=int(n * 4), freq="Q") ser = Series(np.random.randn(len(rng)), rng) _, ax = self.plt.subplots() ser.plot(ax=ax) xaxis = ax.get_xaxis() rs1.append(xaxis.get_majorticklocs()[0]) (vmin, vmax) = ax.get_xlim() ax.set_xlim(vmin + 0.9, vmax) rs2.append(xaxis.get_majorticklocs()[0]) self.plt.close(ax.get_figure()) assert rs1 == xpl1 assert rs2 == xpl2 def test_finder_monthly(self): yrs = [1.15, 2.5, 4, 11] xpl1 = xpl2 = [Period("Jan 1988").ordinal] * len(yrs) rs1 = [] rs2 = [] for i, n in enumerate(yrs): rng = period_range("1987Q2", periods=int(n * 12), freq="M") ser = Series(np.random.randn(len(rng)), rng) _, ax = self.plt.subplots() ser.plot(ax=ax) xaxis = ax.get_xaxis() rs1.append(xaxis.get_majorticklocs()[0]) vmin, vmax = ax.get_xlim() ax.set_xlim(vmin + 0.9, vmax) rs2.append(xaxis.get_majorticklocs()[0]) self.plt.close(ax.get_figure()) assert rs1 == xpl1 assert rs2 == xpl2 def test_finder_monthly_long(self): rng = period_range("1988Q1", periods=24 * 12, freq="M") ser = Series(np.random.randn(len(rng)), rng) _, ax = self.plt.subplots() ser.plot(ax=ax) xaxis = ax.get_xaxis() rs = xaxis.get_majorticklocs()[0] xp = Period("1989Q1", "M").ordinal assert rs == xp def test_finder_annual(self): xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170] xp = [Period(x, freq="A").ordinal for x in xp] rs = [] for i, nyears in enumerate([5, 10, 19, 49, 99, 199, 599, 1001]): rng = period_range("1987", periods=nyears, freq="A") ser = Series(np.random.randn(len(rng)), rng) _, ax = self.plt.subplots() ser.plot(ax=ax) xaxis = ax.get_xaxis() rs.append(xaxis.get_majorticklocs()[0]) self.plt.close(ax.get_figure()) assert rs == xp def test_finder_minutely(self): nminutes = 50 * 24 * 60 rng = date_range("1/1/1999", freq="Min", periods=nminutes) ser = Series(np.random.randn(len(rng)), rng) _, ax = self.plt.subplots() ser.plot(ax=ax) xaxis = ax.get_xaxis() rs = xaxis.get_majorticklocs()[0] xp = Period("1/1/1999", freq="Min").ordinal assert rs == xp def test_finder_hourly(self): nhours = 23 rng = date_range("1/1/1999", freq="H", periods=nhours) ser = Series(np.random.randn(len(rng)), rng) _, ax = self.plt.subplots() ser.plot(ax=ax) xaxis = ax.get_xaxis() rs = xaxis.get_majorticklocs()[0] xp = Period("1/1/1999", freq="H").ordinal assert rs == xp def test_gaps(self): ts = tm.makeTimeSeries() ts[5:25] = np.nan _, ax = self.plt.subplots() ts.plot(ax=ax) lines = ax.get_lines() assert len(lines) == 1 line = lines[0] data = line.get_xydata() if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3: data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) assert isinstance(data, np.ma.core.MaskedArray) mask = data.mask assert mask[5:25, 1].all() self.plt.close(ax.get_figure()) # irregular ts = tm.makeTimeSeries() ts = ts[[0, 1, 2, 5, 7, 9, 12, 15, 20]] ts[2:5] = np.nan _, ax = self.plt.subplots() ax = ts.plot(ax=ax) lines = ax.get_lines() assert len(lines) == 1 line = lines[0] data = line.get_xydata() if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3: data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) assert isinstance(data, np.ma.core.MaskedArray) mask = data.mask assert mask[2:5, 1].all() self.plt.close(ax.get_figure()) # non-ts idx = [0, 1, 2, 5, 7, 9, 12, 15, 20] ser = Series(np.random.randn(len(idx)), idx) ser[2:5] = np.nan _, ax = self.plt.subplots() ser.plot(ax=ax) lines = ax.get_lines() assert len(lines) == 1 line = lines[0] data = line.get_xydata() if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3: data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) assert isinstance(data, np.ma.core.MaskedArray) mask = data.mask assert mask[2:5, 1].all() def test_gap_upsample(self): low = tm.makeTimeSeries() low[5:25] = np.nan _, ax = self.plt.subplots() low.plot(ax=ax) idxh = date_range(low.index[0], low.index[-1], freq="12h") s = Series(np.random.randn(len(idxh)), idxh) s.plot(secondary_y=True) lines = ax.get_lines() assert len(lines) == 1 assert len(ax.right_ax.get_lines()) == 1 line = lines[0] data = line.get_xydata() if self.mpl_ge_3_0_0 or not self.mpl_ge_2_2_3: data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) assert isinstance(data, np.ma.core.MaskedArray) mask = data.mask assert mask[5:25, 1].all() def test_secondary_y(self): ser = Series(np.random.randn(10)) ser2 = Series(np.random.randn(10)) fig, _ = self.plt.subplots() ax = ser.plot(secondary_y=True) assert hasattr(ax, "left_ax") assert not hasattr(ax, "right_ax") axes = fig.get_axes() line = ax.get_lines()[0] xp = Series(line.get_ydata(), line.get_xdata()) tm.assert_series_equal(ser, xp) assert ax.get_yaxis().get_ticks_position() == "right" assert not axes[0].get_yaxis().get_visible() self.plt.close(fig) _, ax2 = self.plt.subplots() ser2.plot(ax=ax2) assert ax2.get_yaxis().get_ticks_position() == self.default_tick_position self.plt.close(ax2.get_figure()) ax = ser2.plot() ax2 = ser.plot(secondary_y=True) assert ax.get_yaxis().get_visible() assert not hasattr(ax, "left_ax") assert hasattr(ax, "right_ax") assert hasattr(ax2, "left_ax") assert not hasattr(ax2, "right_ax") def test_secondary_y_ts(self): idx = date_range("1/1/2000", periods=10) ser = Series(np.random.randn(10), idx) ser2 = Series(np.random.randn(10), idx) fig, _ = self.plt.subplots() ax = ser.plot(secondary_y=True) assert hasattr(ax, "left_ax") assert not hasattr(ax, "right_ax") axes = fig.get_axes() line = ax.get_lines()[0] xp = Series(line.get_ydata(), line.get_xdata()).to_timestamp() tm.assert_series_equal(ser, xp) assert ax.get_yaxis().get_ticks_position() == "right" assert not axes[0].get_yaxis().get_visible() self.plt.close(fig) _, ax2 = self.plt.subplots() ser2.plot(ax=ax2) assert ax2.get_yaxis().get_ticks_position() == self.default_tick_position self.plt.close(ax2.get_figure()) ax = ser2.plot() ax2 = ser.plot(secondary_y=True) assert ax.get_yaxis().get_visible() @td.skip_if_no_scipy def test_secondary_kde(self): ser = Series(np.random.randn(10)) fig, ax = self.plt.subplots() ax = ser.plot(secondary_y=True, kind="density", ax=ax) assert hasattr(ax, "left_ax") assert not hasattr(ax, "right_ax") axes = fig.get_axes() assert axes[1].get_yaxis().get_ticks_position() == "right" def test_secondary_bar(self): ser = Series(np.random.randn(10)) fig, ax = self.plt.subplots() ser.plot(secondary_y=True, kind="bar", ax=ax) axes = fig.get_axes() assert axes[1].get_yaxis().get_ticks_position() == "right" def test_secondary_frame(self): df = DataFrame(np.random.randn(5, 3), columns=["a", "b", "c"]) axes = df.plot(secondary_y=["a", "c"], subplots=True) assert axes[0].get_yaxis().get_ticks_position() == "right" assert axes[1].get_yaxis().get_ticks_position() == self.default_tick_position assert axes[2].get_yaxis().get_ticks_position() == "right" def test_secondary_bar_frame(self): df = DataFrame(np.random.randn(5, 3), columns=["a", "b", "c"]) axes = df.plot(kind="bar", secondary_y=["a", "c"], subplots=True) assert axes[0].get_yaxis().get_ticks_position() == "right" assert axes[1].get_yaxis().get_ticks_position() == self.default_tick_position assert axes[2].get_yaxis().get_ticks_position() == "right" def test_mixed_freq_regular_first(self): # TODO s1 = tm.makeTimeSeries() s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]] # it works! _, ax = self.plt.subplots() s1.plot(ax=ax) ax2 = s2.plot(style="g", ax=ax) lines = ax2.get_lines() idx1 = PeriodIndex(lines[0].get_xdata()) idx2 = PeriodIndex(lines[1].get_xdata()) tm.assert_index_equal(idx1, s1.index.to_period("B")) tm.assert_index_equal(idx2, s2.index.to_period("B")) left, right = ax2.get_xlim() pidx = s1.index.to_period() assert left <= pidx[0].ordinal assert right >= pidx[-1].ordinal def test_mixed_freq_irregular_first(self): s1 = tm.makeTimeSeries() s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]] _, ax = self.plt.subplots() s2.plot(style="g", ax=ax) s1.plot(ax=ax) assert not hasattr(ax, "freq") lines = ax.get_lines() x1 = lines[0].get_xdata() tm.assert_numpy_array_equal(x1, s2.index.astype(object).values) x2 = lines[1].get_xdata() tm.assert_numpy_array_equal(x2, s1.index.astype(object).values) def test_mixed_freq_regular_first_df(self): # GH 9852 s1 = tm.makeTimeSeries().to_frame() s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :] _, ax = self.plt.subplots() s1.plot(ax=ax) ax2 = s2.plot(style="g", ax=ax) lines = ax2.get_lines() idx1 = PeriodIndex(lines[0].get_xdata()) idx2 = PeriodIndex(lines[1].get_xdata()) assert idx1.equals(s1.index.to_period("B")) assert idx2.equals(s2.index.to_period("B")) left, right = ax2.get_xlim() pidx = s1.index.to_period() assert left <= pidx[0].ordinal assert right >= pidx[-1].ordinal def test_mixed_freq_irregular_first_df(self): # GH 9852 s1 = tm.makeTimeSeries().to_frame() s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :] _, ax = self.plt.subplots() s2.plot(style="g", ax=ax) s1.plot(ax=ax) assert not hasattr(ax, "freq") lines = ax.get_lines() x1 = lines[0].get_xdata() tm.assert_numpy_array_equal(x1, s2.index.astype(object).values) x2 = lines[1].get_xdata() tm.assert_numpy_array_equal(x2, s1.index.astype(object).values) def test_mixed_freq_hf_first(self): idxh = date_range("1/1/1999", periods=365, freq="D") idxl = date_range("1/1/1999", periods=12, freq="M") high = Series(np.random.randn(len(idxh)), idxh) low = Series(np.random.randn(len(idxl)), idxl) _, ax = self.plt.subplots() high.plot(ax=ax) low.plot(ax=ax) for line in ax.get_lines(): assert PeriodIndex(data=line.get_xdata()).freq == "D" def test_mixed_freq_alignment(self): ts_ind = date_range("2012-01-01 13:00", "2012-01-02", freq="H") ts_data = np.random.randn(12) ts = Series(ts_data, index=ts_ind) ts2 = ts.asfreq("T").interpolate() _, ax = self.plt.subplots() ax = ts.plot(ax=ax) ts2.plot(style="r", ax=ax) assert ax.lines[0].get_xdata()[0] == ax.lines[1].get_xdata()[0] def test_mixed_freq_lf_first(self): idxh = date_range("1/1/1999", periods=365, freq="D") idxl = date_range("1/1/1999", periods=12, freq="M") high = Series(np.random.randn(len(idxh)), idxh) low = Series(np.random.randn(len(idxl)), idxl) _, ax = self.plt.subplots() low.plot(legend=True, ax=ax) high.plot(legend=True, ax=ax) for line in ax.get_lines(): assert PeriodIndex(data=line.get_xdata()).freq == "D" leg = ax.get_legend() assert len(leg.texts) == 2 self.plt.close(ax.get_figure()) idxh = date_range("1/1/1999", periods=240, freq="T") idxl = date_range("1/1/1999", periods=4, freq="H") high = Series(np.random.randn(len(idxh)), idxh) low = Series(np.random.randn(len(idxl)), idxl) _, ax = self.plt.subplots() low.plot(ax=ax) high.plot(ax=ax) for line in ax.get_lines(): assert PeriodIndex(data=line.get_xdata()).freq == "T" def test_mixed_freq_irreg_period(self): ts = tm.makeTimeSeries() irreg = ts[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]] rng = period_range("1/3/2000", periods=30, freq="B") ps = Series(np.random.randn(len(rng)), rng) _, ax = self.plt.subplots() irreg.plot(ax=ax) ps.plot(ax=ax) def test_mixed_freq_shared_ax(self): # GH13341, using sharex=True idx1 = date_range("2015-01-01", periods=3, freq="M") idx2 = idx1[:1].union(idx1[2:]) s1 = Series(range(len(idx1)), idx1) s2 = Series(range(len(idx2)), idx2) fig, (ax1, ax2) = self.plt.subplots(nrows=2, sharex=True) s1.plot(ax=ax1) s2.plot(ax=ax2) assert ax1.freq == "M" assert ax2.freq == "M" assert ax1.lines[0].get_xydata()[0, 0] == ax2.lines[0].get_xydata()[0, 0] # using twinx fig, ax1 = self.plt.subplots() ax2 = ax1.twinx() s1.plot(ax=ax1) s2.plot(ax=ax2) assert ax1.lines[0].get_xydata()[0, 0] == ax2.lines[0].get_xydata()[0, 0] # TODO (GH14330, GH14322) # plotting the irregular first does not yet work # fig, ax1 = plt.subplots() # ax2 = ax1.twinx() # s2.plot(ax=ax1) # s1.plot(ax=ax2) # assert (ax1.lines[0].get_xydata()[0, 0] == # ax2.lines[0].get_xydata()[0, 0]) def test_nat_handling(self): _, ax = self.plt.subplots() dti = DatetimeIndex(["2015-01-01", NaT, "2015-01-03"]) s = Series(range(len(dti)), dti) s.plot(ax=ax) xdata = ax.get_lines()[0].get_xdata() # plot x data is bounded by index values assert s.index.min() <= Series(xdata).min() assert Series(xdata).max() <= s.index.max() def test_to_weekly_resampling(self): idxh = date_range("1/1/1999", periods=52, freq="W") idxl = date_range("1/1/1999", periods=12, freq="M") high = Series(np.random.randn(len(idxh)), idxh) low = Series(np.random.randn(len(idxl)), idxl) _, ax = self.plt.subplots() high.plot(ax=ax) low.plot(ax=ax) for line in ax.get_lines(): assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq def test_from_weekly_resampling(self): idxh = date_range("1/1/1999", periods=52, freq="W") idxl = date_range("1/1/1999", periods=12, freq="M") high = Series(np.random.randn(len(idxh)), idxh) low = Series(np.random.randn(len(idxl)), idxl) _, ax = self.plt.subplots() low.plot(ax=ax) high.plot(ax=ax) expected_h = idxh.to_period().asi8.astype(np.float64) expected_l = np.array( [1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549, 1553, 1558, 1562], dtype=np.float64, ) for line in ax.get_lines(): assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq xdata = line.get_xdata(orig=False) if len(xdata) == 12: # idxl lines tm.assert_numpy_array_equal(xdata, expected_l) else: tm.assert_numpy_array_equal(xdata, expected_h) tm.close() def test_from_resampling_area_line_mixed(self): idxh = date_range("1/1/1999", periods=52, freq="W") idxl = date_range("1/1/1999", periods=12, freq="M") high = DataFrame(np.random.rand(len(idxh), 3), index=idxh, columns=[0, 1, 2]) low = DataFrame(np.random.rand(len(idxl), 3), index=idxl, columns=[0, 1, 2]) # low to high for kind1, kind2 in [("line", "area"), ("area", "line")]: _, ax = self.plt.subplots() low.plot(kind=kind1, stacked=True, ax=ax) high.plot(kind=kind2, stacked=True, ax=ax) # check low dataframe result expected_x = np.array( [ 1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549, 1553, 1558, 1562, ], dtype=np.float64, ) expected_y = np.zeros(len(expected_x), dtype=np.float64) for i in range(3): line = ax.lines[i] assert PeriodIndex(line.get_xdata()).freq == idxh.freq tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x) # check stacked values are correct expected_y += low[i].values tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y) # check high dataframe result expected_x = idxh.to_period().asi8.astype(np.float64) expected_y = np.zeros(len(expected_x), dtype=np.float64) for i in range(3): line = ax.lines[3 + i] assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x) expected_y += high[i].values tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y) # high to low for kind1, kind2 in [("line", "area"), ("area", "line")]: _, ax = self.plt.subplots() high.plot(kind=kind1, stacked=True, ax=ax) low.plot(kind=kind2, stacked=True, ax=ax) # check high dataframe result expected_x = idxh.to_period().asi8.astype(np.float64) expected_y = np.zeros(len(expected_x), dtype=np.float64) for i in range(3): line = ax.lines[i] assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x) expected_y += high[i].values tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y) # check low dataframe result expected_x = np.array( [ 1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549, 1553, 1558, 1562, ], dtype=np.float64, ) expected_y = np.zeros(len(expected_x), dtype=np.float64) for i in range(3): lines = ax.lines[3 + i] assert PeriodIndex(data=lines.get_xdata()).freq == idxh.freq tm.assert_numpy_array_equal(lines.get_xdata(orig=False), expected_x) expected_y += low[i].values tm.assert_numpy_array_equal(lines.get_ydata(orig=False), expected_y) def test_mixed_freq_second_millisecond(self): # GH 7772, GH 7760 idxh = date_range("2014-07-01 09:00", freq="S", periods=50) idxl = date_range("2014-07-01 09:00", freq="100L", periods=500) high = Series(np.random.randn(len(idxh)), idxh) low = Series(np.random.randn(len(idxl)), idxl) # high to low _, ax = self.plt.subplots() high.plot(ax=ax) low.plot(ax=ax) assert len(ax.get_lines()) == 2 for line in ax.get_lines(): assert PeriodIndex(data=line.get_xdata()).freq == "L" tm.close() # low to high _, ax = self.plt.subplots() low.plot(ax=ax) high.plot(ax=ax) assert len(ax.get_lines()) == 2 for line in ax.get_lines(): assert PeriodIndex(data=line.get_xdata()).freq == "L" def test_irreg_dtypes(self): # date idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)] df = DataFrame(np.random.randn(len(idx), 3), Index(idx, dtype=object)) _check_plot_works(df.plot) # np.datetime64 idx = date_range("1/1/2000", periods=10) idx = idx[[0, 2, 5, 9]].astype(object) df = DataFrame(np.random.randn(len(idx), 3), idx) _, ax = self.plt.subplots() _check_plot_works(df.plot, ax=ax) def test_time(self): t = datetime(1, 1, 1, 3, 30, 0) deltas = np.random.randint(1, 20, 3).cumsum() ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas]) df = DataFrame( {"a": np.random.randn(len(ts)), "b": np.random.randn(len(ts))}, index=ts ) fig, ax = self.plt.subplots() df.plot(ax=ax) # verify tick labels ticks = ax.get_xticks() labels = ax.get_xticklabels() for t, l in zip(ticks, labels): m, s = divmod(int(t), 60) h, m = divmod(m, 60) rs = l.get_text() if len(rs) > 0: if s != 0: xp = time(h, m, s).strftime("%H:%M:%S") else: xp = time(h, m, s).strftime("%H:%M") assert xp == rs def test_time_change_xlim(self): t = datetime(1, 1, 1, 3, 30, 0) deltas = np.random.randint(1, 20, 3).cumsum() ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas]) df = DataFrame( {"a": np.random.randn(len(ts)), "b": np.random.randn(len(ts))}, index=ts ) fig, ax = self.plt.subplots() df.plot(ax=ax) # verify tick labels ticks = ax.get_xticks() labels = ax.get_xticklabels() for t, l in zip(ticks, labels): m, s = divmod(int(t), 60) h, m = divmod(m, 60) rs = l.get_text() if len(rs) > 0: if s != 0: xp = time(h, m, s).strftime("%H:%M:%S") else: xp = time(h, m, s).strftime("%H:%M") assert xp == rs # change xlim ax.set_xlim("1:30", "5:00") # check tick labels again ticks = ax.get_xticks() labels = ax.get_xticklabels() for t, l in zip(ticks, labels): m, s = divmod(int(t), 60) h, m = divmod(m, 60) rs = l.get_text() if len(rs) > 0: if s != 0: xp = time(h, m, s).strftime("%H:%M:%S") else: xp = time(h, m, s).strftime("%H:%M") assert xp == rs def test_time_musec(self): t = datetime(1, 1, 1, 3, 30, 0) deltas = np.random.randint(1, 20, 3).cumsum() ts = np.array([(t + timedelta(microseconds=int(x))).time() for x in deltas]) df = DataFrame( {"a": np.random.randn(len(ts)), "b": np.random.randn(len(ts))}, index=ts ) fig, ax = self.plt.subplots() ax = df.plot(ax=ax) # verify tick labels ticks = ax.get_xticks() labels = ax.get_xticklabels() for t, l in zip(ticks, labels): m, s = divmod(int(t), 60) us = round((t - int(t)) * 1e6) h, m = divmod(m, 60) rs = l.get_text() if len(rs) > 0: if (us % 1000) != 0: xp = time(h, m, s, us).strftime("%H:%M:%S.%f") elif (us // 1000) != 0: xp = time(h, m, s, us).strftime("%H:%M:%S.%f")[:-3] elif s != 0: xp = time(h, m, s, us).strftime("%H:%M:%S") else: xp = time(h, m, s, us).strftime("%H:%M") assert xp == rs def test_secondary_upsample(self): idxh = date_range("1/1/1999", periods=365, freq="D") idxl = date_range("1/1/1999", periods=12, freq="M") high = Series(np.random.randn(len(idxh)), idxh) low = Series(np.random.randn(len(idxl)), idxl) _, ax = self.plt.subplots() low.plot(ax=ax) ax = high.plot(secondary_y=True, ax=ax) for line in ax.get_lines(): assert PeriodIndex(line.get_xdata()).freq == "D" assert hasattr(ax, "left_ax") assert not hasattr(ax, "right_ax") for line in ax.left_ax.get_lines(): assert PeriodIndex(line.get_xdata()).freq == "D" def test_secondary_legend(self): fig = self.plt.figure() ax = fig.add_subplot(211) # ts df = tm.makeTimeDataFrame() df.plot(secondary_y=["A", "B"], ax=ax) leg = ax.get_legend() assert len(leg.get_lines()) == 4 assert leg.get_texts()[0].get_text() == "A (right)" assert leg.get_texts()[1].get_text() == "B (right)" assert leg.get_texts()[2].get_text() == "C" assert leg.get_texts()[3].get_text() == "D" assert ax.right_ax.get_legend() is None colors = set() for line in leg.get_lines(): colors.add(line.get_color()) # TODO: color cycle problems assert len(colors) == 4 self.plt.close(fig) fig = self.plt.figure() ax = fig.add_subplot(211) df.plot(secondary_y=["A", "C"], mark_right=False, ax=ax) leg = ax.get_legend() assert len(leg.get_lines()) == 4 assert leg.get_texts()[0].get_text() == "A" assert leg.get_texts()[1].get_text() == "B" assert leg.get_texts()[2].get_text() == "C" assert leg.get_texts()[3].get_text() == "D" self.plt.close(fig) fig, ax = self.plt.subplots() df.plot(kind="bar", secondary_y=["A"], ax=ax) leg = ax.get_legend() assert leg.get_texts()[0].get_text() == "A (right)" assert leg.get_texts()[1].get_text() == "B" self.plt.close(fig) fig, ax = self.plt.subplots() df.plot(kind="bar", secondary_y=["A"], mark_right=False, ax=ax) leg = ax.get_legend() assert leg.get_texts()[0].get_text() == "A" assert leg.get_texts()[1].get_text() == "B" self.plt.close(fig) fig = self.plt.figure() ax = fig.add_subplot(211) df = tm.makeTimeDataFrame() ax = df.plot(secondary_y=["C", "D"], ax=ax) leg = ax.get_legend() assert len(leg.get_lines()) == 4 assert ax.right_ax.get_legend() is None colors = set() for line in leg.get_lines(): colors.add(line.get_color()) # TODO: color cycle problems assert len(colors) == 4 self.plt.close(fig) # non-ts df = tm.makeDataFrame() fig = self.plt.figure() ax = fig.add_subplot(211) ax = df.plot(secondary_y=["A", "B"], ax=ax) leg = ax.get_legend() assert len(leg.get_lines()) == 4 assert ax.right_ax.get_legend() is None colors = set() for line in leg.get_lines(): colors.add(line.get_color()) # TODO: color cycle problems assert len(colors) == 4 self.plt.close() fig = self.plt.figure() ax = fig.add_subplot(211) ax = df.plot(secondary_y=["C", "D"], ax=ax) leg = ax.get_legend() assert len(leg.get_lines()) == 4 assert ax.right_ax.get_legend() is None colors = set() for line in leg.get_lines(): colors.add(line.get_color()) # TODO: color cycle problems assert len(colors) == 4 def test_format_date_axis(self): rng = date_range("1/1/2012", periods=12, freq="M") df = DataFrame(np.random.randn(len(rng), 3), rng) _, ax = self.plt.subplots() ax = df.plot(ax=ax) xaxis = ax.get_xaxis() for line in xaxis.get_ticklabels(): if len(line.get_text()) > 0: assert line.get_rotation() == 30 def test_ax_plot(self): x = date_range(start="2012-01-02", periods=10, freq="D") y = list(range(len(x))) _, ax = self.plt.subplots() lines = ax.plot(x, y, label="Y") tm.assert_index_equal(DatetimeIndex(lines[0].get_xdata()), x) def test_mpl_nopandas(self): dates = [date(2008, 12, 31), date(2009, 1, 31)] values1 = np.arange(10.0, 11.0, 0.5) values2 = np.arange(11.0, 12.0, 0.5) kw = {"fmt": "-", "lw": 4} _, ax = self.plt.subplots() ax.plot_date([x.toordinal() for x in dates], values1, **kw) ax.plot_date([x.toordinal() for x in dates], values2, **kw) line1, line2 = ax.get_lines() exp = np.array([x.toordinal() for x in dates], dtype=np.float64) tm.assert_numpy_array_equal(line1.get_xydata()[:, 0], exp) exp = np.array([x.toordinal() for x in dates], dtype=np.float64) tm.assert_numpy_array_equal(line2.get_xydata()[:, 0], exp) def test_irregular_ts_shared_ax_xlim(self): # GH 2960 from pandas.plotting._matplotlib.converter import DatetimeConverter ts = tm.makeTimeSeries()[:20] ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]] # plot the left section of the irregular series, then the right section _, ax = self.plt.subplots() ts_irregular[:5].plot(ax=ax) ts_irregular[5:].plot(ax=ax) # check that axis limits are correct left, right = ax.get_xlim() assert left <= DatetimeConverter.convert(ts_irregular.index.min(), "", ax) assert right >= DatetimeConverter.convert(ts_irregular.index.max(), "", ax) def test_secondary_y_non_ts_xlim(self): # GH 3490 - non-timeseries with secondary y index_1 = [1, 2, 3, 4] index_2 = [5, 6, 7, 8] s1 = Series(1, index=index_1) s2 = Series(2, index=index_2) _, ax = self.plt.subplots() s1.plot(ax=ax) left_before, right_before = ax.get_xlim() s2.plot(secondary_y=True, ax=ax) left_after, right_after = ax.get_xlim() assert left_before >= left_after assert right_before < right_after def test_secondary_y_regular_ts_xlim(self): # GH 3490 - regular-timeseries with secondary y index_1 = date_range(start="2000-01-01", periods=4, freq="D") index_2 = date_range(start="2000-01-05", periods=4, freq="D") s1 = Series(1, index=index_1) s2 = Series(2, index=index_2) _, ax = self.plt.subplots() s1.plot(ax=ax) left_before, right_before = ax.get_xlim() s2.plot(secondary_y=True, ax=ax) left_after, right_after = ax.get_xlim() assert left_before >= left_after assert right_before < right_after def test_secondary_y_mixed_freq_ts_xlim(self): # GH 3490 - mixed frequency timeseries with secondary y rng = date_range("2000-01-01", periods=10000, freq="min") ts = Series(1, index=rng) _, ax = self.plt.subplots() ts.plot(ax=ax) left_before, right_before = ax.get_xlim() ts.resample("D").mean().plot(secondary_y=True, ax=ax) left_after, right_after = ax.get_xlim() # a downsample should not have changed either limit assert left_before == left_after assert right_before == right_after def test_secondary_y_irregular_ts_xlim(self): # GH 3490 - irregular-timeseries with secondary y from pandas.plotting._matplotlib.converter import DatetimeConverter ts = tm.makeTimeSeries()[:20] ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]] _, ax = self.plt.subplots() ts_irregular[:5].plot(ax=ax) # plot higher-x values on secondary axis ts_irregular[5:].plot(secondary_y=True, ax=ax) # ensure secondary limits aren't overwritten by plot on primary ts_irregular[:5].plot(ax=ax) left, right = ax.get_xlim() assert left <= DatetimeConverter.convert(ts_irregular.index.min(), "", ax) assert right >= DatetimeConverter.convert(ts_irregular.index.max(), "", ax) def test_plot_outofbounds_datetime(self): # 2579 - checking this does not raise values = [date(1677, 1, 1), date(1677, 1, 2)] _, ax = self.plt.subplots() ax.plot(values) values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)] ax.plot(values) def test_format_timedelta_ticks_narrow(self): expected_labels = [f"00:00:00.0000000{i:0>2d}" for i in np.arange(10)] rng = timedelta_range("0", periods=10, freq="ns") df = DataFrame(np.random.randn(len(rng), 3), rng) fig, ax = self.plt.subplots() df.plot(fontsize=2, ax=ax) self.plt.draw() labels = ax.get_xticklabels() result_labels = [x.get_text() for x in labels] assert len(result_labels) == len(expected_labels) assert result_labels == expected_labels def test_format_timedelta_ticks_wide(self): expected_labels = [ "00:00:00", "1 days 03:46:40", "2 days 07:33:20", "3 days 11:20:00", "4 days 15:06:40", "5 days 18:53:20", "6 days 22:40:00", "8 days 02:26:40", "9 days 06:13:20", ] rng = timedelta_range("0", periods=10, freq="1 d") df = DataFrame(np.random.randn(len(rng), 3), rng) fig, ax = self.plt.subplots() ax = df.plot(fontsize=2, ax=ax) self.plt.draw() labels = ax.get_xticklabels() result_labels = [x.get_text() for x in labels] assert len(result_labels) == len(expected_labels) assert result_labels == expected_labels def test_timedelta_plot(self): # test issue #8711 s = Series(range(5), timedelta_range("1day", periods=5)) _, ax = self.plt.subplots() _check_plot_works(s.plot, ax=ax) # test long period index = timedelta_range("1 day 2 hr 30 min 10 s", periods=10, freq="1 d") s = Series(np.random.randn(len(index)), index) _, ax = self.plt.subplots() _check_plot_works(s.plot, ax=ax) # test short period index = timedelta_range("1 day 2 hr 30 min 10 s", periods=10, freq="1 ns") s = Series(np.random.randn(len(index)), index) _, ax = self.plt.subplots() _check_plot_works(s.plot, ax=ax) def test_hist(self): # https://github.com/matplotlib/matplotlib/issues/8459 rng = date_range("1/1/2011", periods=10, freq="H") x = rng w1 = np.arange(0, 1, 0.1) w2 = np.arange(0, 1, 0.1)[::-1] _, ax = self.plt.subplots() ax.hist([x, x], weights=[w1, w2]) def test_overlapping_datetime(self): # GB 6608 s1 = Series( [1, 2, 3], index=[ datetime(1995, 12, 31), datetime(2000, 12, 31), datetime(2005, 12, 31), ], ) s2 = Series( [1, 2, 3], index=[ datetime(1997, 12, 31), datetime(2003, 12, 31), datetime(2008, 12, 31), ], ) # plot first series, then add the second series to those axes, # then try adding the first series again _, ax = self.plt.subplots() s1.plot(ax=ax) s2.plot(ax=ax) s1.plot(ax=ax) @pytest.mark.xfail(reason="GH9053 matplotlib does not use ax.xaxis.converter") def test_add_matplotlib_datetime64(self): # GH9053 - ensure that a plot with PeriodConverter still understands # datetime64 data. This still fails because matplotlib overrides the # ax.xaxis.converter with a DatetimeConverter s = Series(np.random.randn(10), index=date_range("1970-01-02", periods=10)) ax = s.plot() with tm.assert_produces_warning(DeprecationWarning): # multi-dimensional indexing ax.plot(s.index, s.values, color="g") l1, l2 = ax.lines tm.assert_numpy_array_equal(l1.get_xydata(), l2.get_xydata()) def test_matplotlib_scatter_datetime64(self): # https://github.com/matplotlib/matplotlib/issues/11391 df = DataFrame(np.random.RandomState(0).rand(10, 2), columns=["x", "y"]) df["time"] = date_range("2018-01-01", periods=10, freq="D") fig, ax = self.plt.subplots() ax.scatter(x="time", y="y", data=df) self.plt.draw() label = ax.get_xticklabels()[0] if self.mpl_ge_3_2_0: expected = "2018-01-01" elif self.mpl_ge_3_0_0: expected = "2017-12-08" else: expected = "2017-12-12" assert label.get_text() == expected def test_check_xticks_rot(self): # https://github.com/pandas-dev/pandas/issues/29460 # regular time series x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-03"]) df = DataFrame({"x": x, "y": [1, 2, 3]}) axes = df.plot(x="x", y="y") self._check_ticks_props(axes, xrot=0) # irregular time series x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-04"]) df = DataFrame({"x": x, "y": [1, 2, 3]}) axes = df.plot(x="x", y="y") self._check_ticks_props(axes, xrot=30) # use timeseries index or not axes = df.set_index("x").plot(y="y", use_index=True) self._check_ticks_props(axes, xrot=30) axes = df.set_index("x").plot(y="y", use_index=False) self._check_ticks_props(axes, xrot=0) # separate subplots axes = df.plot(x="x", y="y", subplots=True, sharex=True) self._check_ticks_props(axes, xrot=30) axes = df.plot(x="x", y="y", subplots=True, sharex=False) self._check_ticks_props(axes, xrot=0) def _check_plot_works(f, freq=None, series=None, *args, **kwargs): import matplotlib.pyplot as plt fig = plt.gcf() try: plt.clf() ax = fig.add_subplot(211) orig_ax = kwargs.pop("ax", plt.gca()) orig_axfreq = getattr(orig_ax, "freq", None) ret = f(*args, **kwargs) assert ret is not None # do something more intelligent ax = kwargs.pop("ax", plt.gca()) if series is not None: dfreq = series.index.freq if isinstance(dfreq, BaseOffset): dfreq = dfreq.rule_code if orig_axfreq is None: assert ax.freq == dfreq if freq is not None and orig_axfreq is None: assert ax.freq == freq ax = fig.add_subplot(212) kwargs["ax"] = ax ret = f(*args, **kwargs) assert ret is not None # TODO: do something more intelligent with tm.ensure_clean(return_filelike=True) as path: plt.savefig(path) # GH18439 # this is supported only in Python 3 pickle since # pickle in Python2 doesn't support instancemethod pickling # TODO(statsmodels 0.10.0): Remove the statsmodels check # https://github.com/pandas-dev/pandas/issues/24088 # https://github.com/statsmodels/statsmodels/issues/4772 if "statsmodels" not in sys.modules: with tm.ensure_clean(return_filelike=True) as path: pickle.dump(fig, path) finally: plt.close(fig)
bsd-3-clause
ShujiaHuang/AsmVar
src/AsmvarAlterAlign/ASV_AlterAlignMain.py
2
8698
""" =============================================================================== Use pysam model to calculate the AS from bam file =============================================================================== Author : Shujia Huang Date : 2014-03-25 14:29:08 """ import sys import re import optparse import os import string import pysam import matplotlib.pyplot as plt import numpy as np import AlterAlign as ATA def IsSNP(refbase, alleles): isSnp = True for ale in alleles: if len(ale) > 1 or len(refbase) > 1: isSnp = False return isSnp def main(opt): vcfInfile = opt.vcfInfile bamInfile = opt.bamInfile faInfile = opt.refInfile sampleID = opt.sample refId = opt.refChrId outPrefix = opt.outPrefix mapq = opt.mapq newID = opt.exc fa = ATA.LoadFaSeq(faInfile, refId) print >> sys.stderr, '# [INFO] Fa Loading finish ***' if bamInfile[-4:] == '.bam': samInHandle = pysam.Samfile(bamInfile, 'rb') else : samInHandle = pysam.Samfile(bamInfile, 'r') #samOutHandle = pysam.Samfile(outPrefix + '.bam', 'wb', template=samInHandle) vcfOutHandle = open(outPrefix + '.vcf', 'w') print >> sys.stderr, '# [INFO] Now Scaning the VCF and doing alternate align ... ...' if vcfInfile[-3:] == '.gz': if refId == "ALL": I = os.popen('gzip -dc %s' % vcfInfile) else: I = os.popen('/home/siyang/Bin/software_pip/tabix-0.2.6/tabix -h %s %s' % (vcfInfile, refId)) else : I = open(vcfInfile) frist = True while 1: lines = I.readlines(100000) if not lines : break; for line in lines : line = line.strip('\n') col = line.split() if re.search(r'^##', line): if frist and re.search(r'^##FORMAT=', line): frist = False vcfOutHandle.write('##FORMAT=<ID=AA,Number=4,Type=Integer,Description="Information of Alternate Align. Format: Ref_perfect,Alt_Perfect,Both_Perfect,Both_Imperfect">\n') vcfOutHandle.write('%s\n' % line) continue elif re.search(r'^#CHROM', line): if len(col) < 10 : print >> sys.stderr, '# [ERROR] The input vcf file (%s) does not contain the "Sample" fields!\n' % vcfInfile sys.exit(1) sam2col = {sam:i+9 for i, sam in enumerate(col[9:])} if sampleID not in sam2col: print >> sys.stderr, '# [ERROR] The input sample id (%s) is not match in Vcf file(%s)\n' % (sampleID, vcfInfile) if len(newID) > 0: vcfOutHandle.write('%s\t%s\n' % ('\t'.join(col[:9]), newID)) else: vcfOutHandle.write('%s\t%s\n' % ('\t'.join(col[:9]), sampleID)) continue if refId != 'ALL' and refId != col[0]: continue if col[4] == '.': continue # ignore REFCALL or INTERGAP idx = sam2col[sampleID] fi = col[idx].split(':') gt = fi[0].split('/') if '|' in fi[0]: gt = fi[0].split('|') gtIdx = 1 # Default is the first ALT Sequence if len(gt) == 2 and gt[1] != '.': gtIdx = string.atoi(gt[1]) col[4] = col[4].split(',')[gtIdx-1] # Match to the identity sample isAltAlign = False zr,za,zc,zi = 0,0,0,0 if not IsSNP(col[3], [col[4]]): # Not SNP, INTERGAP ... isAltAlign = True zr,za,zc,zi = ATA.Align(samInHandle, fa, col[0], string.atoi(col[1]), col[2], col[3], col[4][1:], #col[4][0] is reference mapq) # Ignore the position which is meanless if not isAltAlign and col[idx] == './.': continue fm = {t:i for i, t in enumerate(col[8].split(':'))} # Get Format if col[idx] != './.' and len(fi) != len(fm): raise ValueError('[ERROR] The format of "FORMAT"' + 'fields is not match sample ' + '%r in %r' % (col[idx], fm)) for type in ['VS', 'VT']: if type not in fm: raise ValueError('[ERROR] The format of VCF file is ' + 'not right which you input, it did ' + 'not contian %s field in FORMAT') format = {} for k, i in fm.items(): if k != 'GT' and col[idx] != './.': format[k] = fi[i] # Use first sample which is not './.' to set VT and VS if col[idx] == './.' # This is the same idea with what we do above for 'gtIdx = 1' if col[idx] == './.': if isAltAlign: isam = [sam for sam in col[9:] if sam != './.' and not re.search(r'^0/0:', sam)] if len(isam) == 0: # This may happen if appear duplication position and pick the # REFCALL instand of Variant call when CombineVar with GATK isam = [sam for sam in col[9:] if sam != './.'][0].split(':') else: isam = isam[0].split(':') format['VT'] = isam[fm['VT']] format['VS'] = isam[fm['VS']] if isAltAlign: format['AA'] = ','.join(str(a) for a in [zr,za,zc,zi]) gt = fi[fm['GT']].split('/') if '|' in fi[fm['GT']]: gt = fi[fm['GT']].split('|') for i, g in enumerate(gt): if g != '.' and string.atoi(g) > 1: gt[i] = '1' if '|' in fi[fm['GT']]: fi[fm['GT']] = '|'.join(gt) else: fi[fm['GT']] = '/'.join(gt) col[8] = 'GT:' + ':'.join(sorted(format.keys())) # Still keep the origin genotype col[idx] = fi[fm['GT']] + ':' + ':'.join([format[k] for k in sorted(format.keys())]) vcfOutHandle.write('%s\t%s\n' % ('\t'.join(col[:9]), col[idx])) I.close() samInHandle.close() vcfOutHandle.close() print >> sys.stderr, '# [INFO] Closing the two Ouput files :\n -- %s' % (outPrefix + '.vcf') ######################################################################## ######################################################################## if __name__ == '__main__' : usage = "\nUsage : %prog [option] [-v vcfInfile] > Output" optp = optparse.OptionParser(usage=usage) optp.add_option("-v", "--vcf", dest="vcfInfile", metavar="VCF", help="Variants. VCF format.", default=[] ) optp.add_option("-b", "--bam", dest="bamInfile", metavar="BAM", help="Bam Alignment file. ", default=[] ) optp.add_option("-c", "--chr", dest="refChrId" , metavar="CHR", help="The chr ID of Re." , default='ALL') optp.add_option("-r", "--ref", dest="refInfile", metavar="REF", help="Reference fa format. ", default=[] ) optp.add_option("-s", "--smp", dest="sample" , metavar="SMP", help="Sample ID." , default=[] ) optp.add_option("-o", "--out", dest="outPrefix", metavar="OUT", help="The prefix of output. [out]" , default = 'out') optp.add_option("-q", "--qul", dest="mapq" , metavar="QUL", help="Threshold of Mapping Quality. [20]", default = '20' ) optp.add_option("-e", "--exc", dest="exc" , metavar="EXC", help="Change Sample ID(-s) to be -e in output", default=[]) opt, _ = optp.parse_args() if len(opt.vcfInfile) == 0: optp.error("Required [-v vcfInfile]\n") if len(opt.bamInfile) == 0: optp.error("Required [-b bamInfile]\n") if len(opt.refInfile) == 0: optp.error("Required [-r reference] Fa format\n") if len(opt.sample ) == 0: optp.error("Required [-s sample ID]\n") opt.mapq = string.atoi(opt.mapq) print >> sys.stderr, '#[INFO] Paraeters: python', sys.argv[0], '\n\t-v', opt.vcfInfile, \ '\n\t-b', opt.bamInfile, '\n\t-r', opt.refInfile, '\n\t-s', opt.sample, '\n\t-o', opt.outPrefix, \ '\n\t-q', opt.mapq , '\n\t-c', opt.refChrId if len(opt.exc) > 0: print >> sys.stderr, '\t-e', opt.exc, '\n' else : print >> sys.stderr, '\n' main(opt) print >> sys.stderr, '*********************** ALL DONE ***********************\n'
mit
vibhorag/scikit-learn
examples/mixture/plot_gmm_classifier.py
250
3918
""" ================== GMM classification ================== Demonstration of Gaussian mixture models for classification. See :ref:`gmm` for more information on the estimator. Plots predicted labels on both training and held out test data using a variety of GMM classifiers on the iris dataset. Compares GMMs with spherical, diagonal, full, and tied covariance matrices in increasing order of performance. Although one would expect full covariance to perform best in general, it is prone to overfitting on small datasets and does not generalize well to held out test data. On the plots, train data is shown as dots, while test data is shown as crosses. The iris dataset is four-dimensional. Only the first two dimensions are shown here, and thus some points are separated in other dimensions. """ print(__doc__) # Author: Ron Weiss <ronweiss@gmail.com>, Gael Varoquaux # License: BSD 3 clause # $Id$ import matplotlib.pyplot as plt import matplotlib as mpl import numpy as np from sklearn import datasets from sklearn.cross_validation import StratifiedKFold from sklearn.externals.six.moves import xrange from sklearn.mixture import GMM def make_ellipses(gmm, ax): for n, color in enumerate('rgb'): v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2]) u = w[0] / np.linalg.norm(w[0]) angle = np.arctan2(u[1], u[0]) angle = 180 * angle / np.pi # convert to degrees v *= 9 ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1], 180 + angle, color=color) ell.set_clip_box(ax.bbox) ell.set_alpha(0.5) ax.add_artist(ell) iris = datasets.load_iris() # Break up the dataset into non-overlapping training (75%) and testing # (25%) sets. skf = StratifiedKFold(iris.target, n_folds=4) # Only take the first fold. train_index, test_index = next(iter(skf)) X_train = iris.data[train_index] y_train = iris.target[train_index] X_test = iris.data[test_index] y_test = iris.target[test_index] n_classes = len(np.unique(y_train)) # Try GMMs using different types of covariances. classifiers = dict((covar_type, GMM(n_components=n_classes, covariance_type=covar_type, init_params='wc', n_iter=20)) for covar_type in ['spherical', 'diag', 'tied', 'full']) n_classifiers = len(classifiers) plt.figure(figsize=(3 * n_classifiers / 2, 6)) plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05, left=.01, right=.99) for index, (name, classifier) in enumerate(classifiers.items()): # Since we have class labels for the training data, we can # initialize the GMM parameters in a supervised manner. classifier.means_ = np.array([X_train[y_train == i].mean(axis=0) for i in xrange(n_classes)]) # Train the other parameters using the EM algorithm. classifier.fit(X_train) h = plt.subplot(2, n_classifiers / 2, index + 1) make_ellipses(classifier, h) for n, color in enumerate('rgb'): data = iris.data[iris.target == n] plt.scatter(data[:, 0], data[:, 1], 0.8, color=color, label=iris.target_names[n]) # Plot the test data with crosses for n, color in enumerate('rgb'): data = X_test[y_test == n] plt.plot(data[:, 0], data[:, 1], 'x', color=color) y_train_pred = classifier.predict(X_train) train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100 plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy, transform=h.transAxes) y_test_pred = classifier.predict(X_test) test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100 plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy, transform=h.transAxes) plt.xticks(()) plt.yticks(()) plt.title(name) plt.legend(loc='lower right', prop=dict(size=12)) plt.show()
bsd-3-clause
nachitoys/distributionalSemanticStabilityThesis
mkl.py
2
6182
from modshogun import * from numpy import * from sklearn.metrics import r2_score from scipy.stats import randint from scipy.stats import randint as sp_randint from scipy.stats import expon from mkl_regressor import * from time import localtime, strftime if __name__ == "__main__": import Gnuplot, Gnuplot.funcutils from sklearn.grid_search import RandomizedSearchCV as RS from argparse import ArgumentParser as ap parser = ap(description='This script trains/applies a SVR over any input dataset of numerical representations. The main aim is to determine a set of learning parameters') parser.add_argument("-x", help="Input file name (train vectors)", metavar="input_file", default=None) parser.add_argument("-y", help="""Regression labels file. Do not specify this argument if you want to uniauely predict over any test set. In this case, you must to specify the SVR model to be loaded as the parameter of the option -o.""", metavar="regrLabs_file", default = None) parser.add_argument("-X", help="Input file name (TEST vectors)", metavar="test_file", default = None) parser.add_argument("-Y", help="Test labels file.", metavar="testLabs_file", default = None) parser.add_argument("-n", help="Number of tests to be performed.", metavar="tests_amount", default=1) parser.add_argument("-o", help="""The operation the input data was derived from. Options: {'conc', 'convss', 'sub'}. In the case you want to give a precalculated center for width randomization (the median width), specify the number. e.g. '-o 123.654'. A filename can be specified, which is the file where a pretrained MKL model, e.g. '-o filename.model'""", metavar="median", default=0.01) #parser.add_argument("-u", help="Especify C regulazation parameter. For a list '-u C:a_b', for a value '-u C:a'.", metavar="fixed_params", default = None) #parser.add_argument("-K", help="Kernel type custom specification. Uniquely valid if -u is not none. Options: gaussian, linear, sigmoid.", metavar="kernel", default = None) #parser.add_argument("-s", help="Toggle if you will process sparse input format.", action="store_true", default = False) parser.add_argument("--estimate", help="Toggle if you will predict the training.", action="store_true", default = False) parser.add_argument("--predict", help="Toggle if you will predict just after estimating (This is assumed if you provide a model file instead of a medianwidth: option '-m'.).", action="store_true", default = False) parser.add_argument("-k", help="k-fold cross validation for the randomized search.", metavar="k-fold_cv", default=None) parser.add_argument("-p", help="Minimum number of basis kernels.", metavar="min_amount", default=2) parser.add_argument("-P", help="Maximum number of basis kernels.", metavar="max_amount", default=10) args = parser.parse_args() #name_components = shatter_file_name() model_file = None # "/almac/ignacio/data/mkl_models/mkl_0.model" out_file = "mkl_outs/mkl_idx_corpus_source_repr_dims_op_other.out" if args.X: # Test set. labels_t = loadtxt(args.Y) #loadtxt("/almac/ignacio/data/sts_all/pairs-NO_2013/STS.gs.FNWN.txt") if args.Y: data_t = loadtxt(args.X) #loadtxt("/almac/ignacio/data/sts_all/pairs-NO_2013/vectors_H10/pairs_eng-NO-test-2e6-nonempty_FNWN_d2v_H10_sub_m5w8.mtx") if args.x != None: assert args.y # If training data given, supply corresponding labels. labels = loadtxt(args.y) #loadtxt("/almac/ignacio/data/sts_all/pairs-NO_2013/STS.gs.OnWN.txt") data = loadtxt(args.x) #loadtxt("/almac/ignacio/data/sts_all/pairs-NO_2013/vectors_H10/pairs_eng-NO-test-2e6-nonempty_OnWN_d2v_H10_sub_m5w8.mtx") k = int(args.k) N = int(args.n) min_p = int(args.p) max_p = int(args.P) median_w = float(args.o) # median_width = None, width_scale = 20.0, min_size=2, max_size = 10, kernel_size = None sys.stderr.write("\n>> [%s] Training session begins...\n" % (strftime("%Y-%m-%d %H:%M:%S", localtime()))) params = {'svm_c': expon(scale=100, loc=0.001), 'mkl_c': expon(scale=100, loc=0.001), 'degree': sp_randint(0, 24), #'widths': expon_vector(loc = m, min_size = 2, max_size = 10) 'width_scale': [0.5, 1.0, 2.0, 2.5, 3.0, 3.5, 4.0], 'median_width': expon(scale=1, loc=median_w), 'kernel_size': [2, 3, 4, 5, 6, 7, 8] } param_grid = [] for i in xrange(N): param_grid.append(params) i = 0 for params in param_grid: mkl = mkl_regressor() rs = RS(mkl, param_distributions = params, n_iter = 20, n_jobs = 24, cv = k, scoring="mean_squared_error")#"r2") rs.fit(data, labels) rs.best_estimator_.save('/almac/ignacio/data/mkl_models/mkl_%d.model' % i) if args.estimate: # If user wants to save estimates test_predict(data = data, machine = rs.best_estimator_, labels = labels, out_file = out_file) if args.predict: # If user wants to predict and save just after training. assert not args.X is None # If test data is provided #preds = rs.best_estimator_.predict(data_t) if args.Y: # Get performance if test labels are provided test_predict(data = data_t, machine = rs.best_estimator_, labels = labels_t, out_file = out_file + ".pred") else: # Only predictions test_predict(data = data_t, machine = rs.best_estimator_, out_file = out_file + ".pred") sys.stderr.write("\n:>> [%s] Finished!!\n" % (strftime("%Y-%m-%d %H:%M:%S", localtime()))) else: idx = 0 test_predict(data = data_t, machine = "mkl_regerssion", file="/almac/ignacio/data/mkl_models/mkl_%d.asc" % idx, labels = labels_t, out_file = out_file) sys.stderr.write("\n:>> [%s] Finished!!\n" % (strftime("%Y-%m-%d %H:%M:%S", localtime())))
gpl-2.0
lukauskas/dgw
dgw/data/containers.py
2
20085
from logging import debug import pandas as pd import numpy as np from dgw.data.parsers.pois import map_to_bins from dgw.dtw import no_nans_len class AlignmentsDataIndexer(object): """ A wrapper around `_NDFrameIndexer` that would return `AlignmentsData` objects instead of `pd.Panel` objects """ _ndframe_indexer = None _alignments_data = None def __init__(self, ndframe_indexer, alignments_data): self._ndframe_indexer = ndframe_indexer self._alignments_data = alignments_data def __getitem__(self, key): result = self._ndframe_indexer.__getitem__(key) if isinstance(result, pd.Panel): data = AlignmentsData(result, self._alignments_data.resolution) data.points_of_interest = self._alignments_data.points_of_interest return data else: return result def __setitem__(self, key, value): self._ndframe_indexer.__setitem__(self, key, value) class RegionsIndexer(object): """ A wrapper around `_NDFrameIndexer` that would return `Regions` objects instead of `pd.DataFrame` objects """ _ndframe_indexer = None regions = None def __init__(self, ndframe_indexer, regions): self._ndframe_indexer = ndframe_indexer self.regions = regions def __getitem__(self, key): result = self._ndframe_indexer.__getitem__(key) if isinstance(result, pd.DataFrame): data = self.regions.__class__(result) return data else: return result def __setitem__(self, key, value): self._ndframe_indexer.__setitem__(self, key, value) class AlignmentsData(object): _data = None _poi = None _scale = None _resolution = None def __init__(self, panel, resolution, poi=None, scale='raw'): """ Initialises `AlignmentsData` with a `panel` provided. The panel is assumed to have data sets on the minor axis See `dgw.data.parsers.read_bam` for how to generate this data. :param panel: `pd.Panel` object `AlignmentsData` will be initialised on, or `pd.DataFrame` that will be converted to Panel :param resolution: resolution of data :param poi: points of interest :param scale: the scale of data :return: """ if isinstance(panel, pd.DataFrame): # Create a panel from the DataFrame by giving it a generic name and making sure it is on the minor axis self._data = pd.Panel(set(['Dataset 1'])).transpose(1, 2, 0) elif isinstance(panel, pd.Panel): self._data = panel else: raise Exception('Invalid type of data provided for AlignmentsData: {0!r}, expected pd.Panel or pd.Dataframe' .format(type(panel))) self._scale = scale self.points_of_interest = poi self._resolution = resolution def reset_poi(self): self._poi = {} @property def data(self): return self._data @property def points_of_interest(self): return self._poi @property def resolution(self): return self._resolution @points_of_interest.setter def points_of_interest(self, value): if value is None: self._poi = {} else: self._poi = dict([(ix, value) for ix, value in value.iteritems() if ix in self.items]) def add_points_of_interest(self, binned_points_of_interest_regions, name): for ix, value in binned_points_of_interest_regions.iteritems(): if ix not in self.items: continue try: self._poi[ix][name] = value except KeyError: self._poi[ix] = {name: value} def drop_no_pois(self): common_index = self.items & self.points_of_interest return self.ix[common_index] #-- Functions that simulate pd.Panel behaviour ------------------------------------------------------------------- def mean(self, axis='items', skipna=True): # Override axis parameter in the pd.Panel mean function return self._data.mean(axis=axis, skipna=skipna) @property def values(self): return self.data.values @property def number_of_datasets(self): return len(self.dataset_axis) @property def number_of_items(self): return len(self.items) @property def number_of_columns(self): return len(self.major_axis) @property def lengths(self): a = [] for _, row in self.data.iteritems(): a.append(no_nans_len(row.values)) return pd.Series(a, index=self.items) @property def dataset_axis(self): return self.data.minor_axis def dataset_xs(self, *args, **kwargs): return self.data.minor_xs(*args, **kwargs) @property def items(self): return self.data.items @property def ix(self): return AlignmentsDataIndexer(self.data.ix, self) def __getitem__(self, item): return self.data.__getitem__(item) def head(self, n=5): return self.ix[:n] @property def major_axis(self): return self.data.major_axis #-- Additional transformations not visible in default pd.Panel ---------------------------------------- def __len__(self): return self.number_of_items def to_log_scale(self): if self._scale == 'log': return self new_data = (self.data + 2).apply(np.log) # Adding +2 so we have no zeros in log output ad = AlignmentsData(new_data, self.resolution, scale='log') ad.points_of_interest = self.points_of_interest return ad def normalise_bin_heights(self): data = {} for ix, data_row in self.data.iteritems(): data[ix] = data_row / data_row.max() data = pd.Panel(data) return self.__class__(data, self.resolution, poi=self.points_of_interest) def plot_heatmap(self, *args, **kwargs): """ Plots heatmap of the data stored in the panel. :param args: args to be passed into `visualisation.heatmap.plot` :param kwargs: kwargs to be passed into `visualisation.heatmap.plot` :return: """ import visualisation.heatmap return visualisation.heatmap.plot(self, *args, **kwargs) def __repr__(self): return '{0} containing\n{1!r}'.format(self.__class__, self.data) def __array__(self, *args, **kwargs): return self.data.__array__(*args, **kwargs) class Regions(object): REQUIRED_COLUMNS = frozenset(['chromosome', 'start', 'end']) _data = None def __init__(self, data, *args, **kwargs): if isinstance(data, Regions): self._data = data.data else: data = pd.DataFrame(data, *args, **kwargs) # Verify that all required columns are in the DF for column in self.REQUIRED_COLUMNS: if column not in data.columns: raise ValueError('No such column {0!r} in provided DataFrame'.format(column)) self._data = data self._data = self._data.drop_duplicates() # TODO: somehow join the indices of the dropped data @property def data(self): return self._data def __repr__(self): return '{0} containing \n{1!r}'.format(self.__class__, self.data) # --- Initialisation ---------------------------------------------------------------------------------------------- @classmethod def from_bed(cls, bed_file): from dgw.data.parsers import read_bed return cls(read_bed(bed_file)) def to_bed(self, bed_file, **track_kwargs): from dgw.data.parsers import write_bed return write_bed(self, bed_file, **track_kwargs) # --- Functions that provide direct access to the DataFrame behind all this ---------------------------------------- def __getitem__(self, item): result = self.data[item] if isinstance(result, pd.DataFrame): try: # Try returining it as the same class return self.__class__(result) except ValueError: # If not valid, return as DataFrame return result return result def has_strand_data(self): return 'strand' in self.columns def infer_strand_from_whether_the_region_was_reversed_or_not(self, reversion_status_dictionary): """ Infers the strand of a region by checking whether the region was reversed or not. :param reversion_status_dictionary: Dictionary of boolean true/false values, true indicated the region was reversed by DTW :return: """ data = self.data.copy() strand_dict = {} for key, reversed in reversion_status_dictionary.iteritems(): if key not in data.index: continue if reversed is None: strand_dict[key] = None elif reversed: strand_dict[key] = '-' else: strand_dict[key] = '+' strand_series = pd.Series(strand_dict) data['strand'] = strand_series return self.__class__(data) def iterrows(self): return self.data.iterrows() def head(self, *args, **kwargs): return self.__class__(self.data.head(*args, **kwargs)) @property def index(self): return self.data.index def join(self, *args, **kwargs): new_data = self.data.join(*args, **kwargs) return self.__class__(new_data) def append(self, *args, **kwargs): new_data = self.data.append(*args, **kwargs) return self.__class__(new_data) @property def ix(self): return RegionsIndexer(self.data.ix, self) @property def columns(self): return self.data.columns def __getattr__(self, name): """ Emulate the behaviour in `pd.DataFrame` to return one of the columns as an attribute. :param name: :return: """ # Partially stolen from pandas implementation. if name in self.data.columns: return self.data[name] raise AttributeError("{0!r} has no attribute {1!r}".format(type(self).__name__, name)) def __len__(self): return self.data.__len__() # --- Functions special to Regions --------------------------------------------------------------------------------- @property def lengths(self): """ Returns a `pd.Series` containing the lengths of all the regions contained :rtype: `pd.Series` """ series = self['end'] - self['start'] series.name = 'length' return series def contained_within(self, other_region): """ Returns all regions that are contained within other region. Returns only those regions that are fully contained within the query region. :param other_region: The region that regions will be checked to be inside :type other_region: `pd.Series` :return: """ return self[(self.chromosome == other_region['chromosome']) & (self.start >= other_region['start']) & (self.end <= other_region['end'])] def as_printable_list_of_pois(self): printable_list = "" for ix, row in self.iterrows(): if printable_list: printable_list += ',' printable_list += ','.join(map(str, range(row['start'], row['end']))) return printable_list def regions_not_in_dataset(self, dataset): """ Returns all regions that are not in the dataset provided. :param dataset: :type dataset: AlignmentsData :return: """ missing_indices = self.index[~self.index.isin(dataset.items)] return Regions(self.data.ix[missing_indices]) def as_bins_of(self, other_regions, resolution=1, ignore_non_overlaps=False, account_for_strand_information=False): """ Returns the current regions as bins of some other set of regions provided :param other_regions: regions to match self to :param resolution: resolution at which to do so :param ignore_non_overlaps: if set to false, the parser will raise a ValueError if self does not overlap with other regions :param account_for_strand_information: if set to true, the parser will account for the antisense regions and return bins from the end, rather than front :return: """ other_regions = other_regions.clip_to_resolution(resolution) bins = {} for ix, data in self.iterrows(): current_start = data['start'] current_end = data['end'] current_chromosome = data['chromosome'] try: other = other_regions.ix[ix] except KeyError: continue other_chromosome = other['chromosome'] if current_chromosome != other_chromosome: if ignore_non_overlaps: continue else: raise ValueError('Points of interest do not overlap with regions of interest. Failed ix:{0!r}'.format(ix)) bins[ix] = map_to_bins(range(current_start, current_end), other, resolution=resolution, ignore_non_overlaps=ignore_non_overlaps, account_for_strand_information=account_for_strand_information) return bins def clip_to_resolution(self, resolution): """ Adjusts the region boundaries to fit the resolution by extending the regions boundaries as to length of the regions is divisible from resolution. Returns a new `Regions` object rather than clipping this one in place Please note that the new `Regions` object this function returns will not include any other fields but the ones listed in `Regions.REQUIRED_COLUMNS`. :param resolution: the resolution to clip :type resolution: int :return: a new `Regions` object :rtype: Regions """ resolution = int(resolution) if resolution == 1: return self elif resolution <= 0: raise ValueError('Resolution should be > 0 ({0!r} given)'.format(resolution)) lengths = self.lengths new_regions_data = [] for ix, row in self.iterrows(): length = lengths.ix[ix] remainder = length % resolution if remainder == 0: offset_needed = 0 else: offset_needed = resolution - remainder add_left = offset_needed / 2 add_right = offset_needed / 2 + offset_needed % 2 row['start'] -= add_left if row['start'] < 0: # Check if we accidentally went sub zero add_right += -row['start'] row['start'] = 0 row['end'] += add_right new_regions_data.append(row) df = pd.DataFrame(new_regions_data, index=self.index) return Regions(df) def bins_to_intervals(self, resolution): """ Returns genomic location intervals in the form of (start, end) for each bin of the region at resolution provided, for each region :param resolution: :return: """ regions = self.clip_to_resolution(resolution) intervals = {} for ix, row in regions.iterrows(): start = row['start'] end = row['end'] intervals[ix] = [(i, i+resolution) for i in np.arange(start, end, resolution, dtype=int)] return intervals class Genes(Regions): def transcription_start_sites(self): """ Returns transcription start site locations for the current set of Genes. Pays attention to the strand of the gene and returns the start of the gene for positive strand genes and the end of the gene for the negative strand genes. :rtype: `pd.Series` """ start_sites = self[self.strand=='+']['start'].append(self[self.strand=='-']['end']) start_sites = start_sites.ix[self.index] # reindex the data using original order return start_sites def regions_around_transcription_start_sites(self, window_width): """ Returns a `Regions` object corresponding to the locations from -window_width to +window_width around the transcription start sites for these genes :param window_width: the width of the window :type window_width: int :return: regions around tss :rtype: `Regions` """ tss_locations = self.transcription_start_sites() # Add window around these starts = tss_locations - window_width starts[starts < 0] = 0 ends = tss_locations + window_width regions_df = pd.DataFrame({'chromosome' : self.chromosome, 'start' : starts, 'end' : ends, 'strand' : self.strand}, index=self.index) return Regions(regions_df) def regions_around_splicing_site(self, splicing_site, window_width): """ Return a `Regions` object with windows around first splicing site :param splicing_site: number of the splicing site to get (0 based - "0" for first splicing site) :param window_width: :return: """ exon = self.get_exon_regions(splicing_site) exon_genes = self.ix[exon.index] ss = exon[exon_genes.strand == '+']['end'].append(exon[exon_genes.strand == '-']['start']) ss = ss.ix[self.index] starts = ss - window_width ends = ss + window_width regions_df = pd.DataFrame({'chromosome': exon_genes['chromosome'], 'start': starts, 'end': ends, 'strand' : exon_genes.strand}, index=exon_genes.index) return Regions(regions_df) def get_exon_regions(self, exon_number, account_for_antisense=True): """ Returns exon regions for particular exon_number provided (0-based) :param exon_number: exon number - 0 based. I.e. use 0 to get regions of first exon :param account_for_antisense: if set to true, it will automatically account for antisense genes and return correct exon :return: """ if 'exonStarts' not in self.columns or 'exonEnds' not in self.columns: raise Exception('No exon data available, sorry') sub_df = self[['chromosome', 'exonStarts', 'exonEnds', 'strand']] new_df = [] for ix, row in sub_df.iterrows(): exon_starts = row['exonStarts'].strip(',').split(',') exon_ends = row['exonEnds'].strip(',').split(',') chromosome = row['chromosome'] strand = row['strand'] if account_for_antisense and strand == '-': exon_i = len(exon_starts) - 1 - exon_number else: exon_i = exon_number try: start = int(exon_starts[exon_i]) end = int(exon_ends[exon_i]) except IndexError: start = np.nan end = np.nan new_df.append({'chromosome': chromosome, 'start': start, 'end': end, 'strand': strand}) new_df = pd.DataFrame(new_df, index=self.index).dropna() return Regions(new_df) @classmethod def from_encode_known_genes(cls, encode_known_genes_filename): from dgw.data.parsers import read_encode_known_genes return read_encode_known_genes(encode_known_genes_filename) @classmethod def from_gtf(cls, gtf_filename): from dgw.data.parsers import read_gtf return read_gtf(gtf_filename)
gpl-3.0
wlamond/scikit-learn
examples/svm/plot_iris.py
65
3742
""" ================================================== Plot different SVM classifiers in the iris dataset ================================================== Comparison of different linear SVM classifiers on a 2D projection of the iris dataset. We only consider the first 2 features of this dataset: - Sepal length - Sepal width This example shows how to plot the decision surface for four SVM classifiers with different kernels. The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly different decision boundaries. This can be a consequence of the following differences: - ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the regular hinge loss. - ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass reduction while ``SVC`` uses the One-vs-One multiclass reduction. Both linear models have linear decision boundaries (intersecting hyperplanes) while the non-linear kernel models (polynomial or Gaussian RBF) have more flexible non-linear decision boundaries with shapes that depend on the kind of kernel and its parameters. .. NOTE:: while plotting the decision function of classifiers for toy 2D datasets can help get an intuitive understanding of their respective expressive power, be aware that those intuitions don't always generalize to more realistic high-dimensional problems. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets def make_meshgrid(x, y, h=.02): """Create a mesh of points to plot in Parameters ---------- x: data to base x-axis meshgrid on y: data to base y-axis meshgrid on h: stepsize for meshgrid, optional Returns ------- xx, yy : ndarray """ x_min, x_max = x.min() - 1, x.max() + 1 y_min, y_max = y.min() - 1, y.max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) return xx, yy def plot_contours(ax, clf, xx, yy, **params): """Plot the decision boundaries for a classifier. Parameters ---------- ax: matplotlib axes object clf: a classifier xx: meshgrid ndarray yy: meshgrid ndarray params: dictionary of params to pass to contourf, optional """ Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) out = ax.contourf(xx, yy, Z, **params) return out # import some data to play with iris = datasets.load_iris() # Take the first two features. We could avoid this by using a two-dim dataset X = iris.data[:, :2] y = iris.target # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors C = 1.0 # SVM regularization parameter models = (svm.SVC(kernel='linear', C=C), svm.LinearSVC(C=C), svm.SVC(kernel='rbf', gamma=0.7, C=C), svm.SVC(kernel='poly', degree=3, C=C)) models = (clf.fit(X, y) for clf in models) # title for the plots titles = ('SVC with linear kernel', 'LinearSVC (linear kernel)', 'SVC with RBF kernel', 'SVC with polynomial (degree 3) kernel') # Set-up 2x2 grid for plotting. fig, sub = plt.subplots(2, 2) plt.subplots_adjust(wspace=0.4, hspace=0.4) X0, X1 = X[:, 0], X[:, 1] xx, yy = make_meshgrid(X0, X1) for clf, title, ax in zip(models, titles, sub.flatten()): plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8) ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k') ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xlabel('Sepal length') ax.set_ylabel('Sepal width') ax.set_xticks(()) ax.set_yticks(()) ax.set_title(title) plt.show()
bsd-3-clause
louispotok/pandas
pandas/io/api.py
14
1146
""" Data IO api """ # flake8: noqa from pandas.io.parsers import read_csv, read_table, read_fwf from pandas.io.clipboards import read_clipboard from pandas.io.excel import ExcelFile, ExcelWriter, read_excel from pandas.io.pytables import HDFStore, get_store, read_hdf from pandas.io.json import read_json from pandas.io.html import read_html from pandas.io.sql import read_sql, read_sql_table, read_sql_query from pandas.io.sas import read_sas from pandas.io.feather_format import read_feather from pandas.io.parquet import read_parquet from pandas.io.stata import read_stata from pandas.io.pickle import read_pickle, to_pickle from pandas.io.packers import read_msgpack, to_msgpack from pandas.io.gbq import read_gbq # deprecation, xref #13790 def Term(*args, **kwargs): import warnings warnings.warn("pd.Term is deprecated as it is not " "applicable to user code. Instead use in-line " "string expressions in the where clause when " "searching in HDFStore", FutureWarning, stacklevel=2) from pandas.io.pytables import Term return Term(*args, **kwargs)
bsd-3-clause
channsoden/hannsoden-bioinformatics
WholeGenomePhylogeny/WGP2_multiple_alignment.py
1
5866
#!/usr/bin/env python # Standard modules import os, sys import subprocess as sp from multiprocessing import Pool # Nonstandard modules from Bio import SeqIO import pandas as pd # My modules import fasta_tools from processing_tools import mapPool from SLURM_tools import submit from SLURM_tools import job_wait import WGP_config as cfg def multiple_alignment(args, fastas): basedir = os.getcwd() alignment = basedir+'/2_alignment/'+args.output+'.fasta' os.chdir('2_alignment') if not os.path.isfile(alignment) or args.force: if args.force: unaligned_fastas = fastas else: unaligned_fastas = [fasta for fasta in fastas if not os.path.isfile(trim_name(fasta))] if unaligned_fastas: chunk_size = int(len(unaligned_fastas) / 4) + 1 chunks = [unaligned_fastas[i:i+chunk_size] for i in [n * chunk_size for n in range(4)]] # Run this script with list of fastas as args jobs = [(submit_alignment_batch, ['{} {} {}'.format(sys.executable, __file__, ' '.join(chunk))]) for chunk in chunks] IDs = mapPool(4, jobs) outfiles = ['mafft_'+str(ID)+'.out' for ID in IDs] errfiles = ['mafft_'+str(ID)+'.err' for ID in IDs] else: outfiles = [] errfiles = [] aligned = [align_name(fasta) for fasta in fastas] # Intermediate files from the alignment process. aligned_trimmed = [trim_name(fasta) for fasta in fastas] # The output files from the aligment process. concatenate_fasta(aligned_trimmed, alignment) cleanup(logs=outfiles+errfiles, trash=fastas+aligned+aligned_trimmed) os.chdir(basedir) return alignment def submit_alignment_batch(job): ID = submit(job, partition = cfg.LARGEpartition, account = cfg.LARGEaccount, qos = cfg.LARGEqos, time = '36:0:0', job_name = 'mafft', cpus_per_task = cfg.LARGEcpus, mem_per_cpu = cfg.LARGEmem, modules = cfg.modules) job_wait(ID) return ID def align_trim(fasta): aligned = align_name(fasta) trimmed = trim_name(fasta) command = '{} --globalpair --maxiterate 1000 --jtt 10 --nuc --inputorder {} 1> {}' command = command.format(cfg.mafft, fasta, aligned) mafft = sp.Popen(command, shell=True) mafft.wait() trim_gap_ends(aligned, trimmed) def align_name(fasta): return os.path.splitext(fasta)[0] + '_aligned.fasta' def trim_name(fasta): return os.path.splitext(fasta)[0] + '_aligned-trimmed.fasta' def trim_gap_ends(infasta, outfasta): infh = open(infasta, 'r') outfh = open(outfasta, 'w') records = list(SeqIO.parse(infh, 'fasta')) boolArray = pd.DataFrame([list(rec.seq) for rec in records]) != '-' start = find_start(boolArray) end = find_end(boolArray) for record in records: record.seq = record.seq[start:end] SeqIO.write(records, outfh, 'fasta') infh.close() outfh.close() def find_start(boolArray): upper = len(boolArray.columns) lower = 0 while upper - lower > 1: i = int((upper + lower) / 2) if has_started(boolArray, i): upper = i else: lower = i return lower def find_end(boolArray): upper = len(boolArray.columns) lower = 0 while upper - lower > 1: i = int((upper + lower) / 2) if has_ended(boolArray, i): upper = i else: lower = i return upper def has_started(boolArray, pos): return boolArray.iloc[:,:pos].any(axis=1).all() def has_ended(boolArray, pos): return not boolArray.iloc[:,pos:].any(axis=1).all() def concatenate_fasta(faAlignments, outfile): """Concatenates a list of fasta-formated alignment files.""" for fa in faAlignments: try: seqs = fasta_tools.fasta_to_dict(fa) break except fasta_tools.ParserError: pass try: concatenated = {name.split()[-1].split(':')[0]: [] for name in seqs} except NameError: raise fasta_tools.ParserError('All alignments are empty or malformed.') bad_files = 0 for fa in faAlignments: try: seqs = fasta_tools.fasta_to_dict(fa) [concatenated[name.split()[-1].split(':')[0]].append(seq) for name, seq in list(seqs.items())] except fasta_tools.ParserError: bad_files += 1 if bad_files: sys.stderr.write( 'Skipping {} empty or malformed alignment files\n'.format(bad_files) ) # Abbreviate thxe sequence names and join all the sequences into a single long string. concatenated = {shorten_name(name): ''.join(seqs) for name, seqs in list(concatenated.items())} fh = open(outfile, 'w') for name, seq in list(concatenated.items()): fh.write('>'+name+'\n') [fh.write(seq[i:i+80]+'\n') for i in range(0, len(seq), 80)] fh.close() return outfile def shorten_name(sequence_name): return '_'.join(sequence_name.strip().split('/')[-1].split('.')[0].split('_')[:2]) def RAxML_valid(seqlist): ustates = set() for seq in seqlist: if len(seq) < 1: return False seq = seq.upper() ustates = ustates.union(set(seq)) if len(ustates) >= 4: return True return False def cleanup(logs=[], trash=[]): try: os.mkdir('logs') except OSError: pass for log in logs: os.rename(log, 'logs/'+log) for f in trash: try: os.remove(f) except OSError: pass if __name__ == '__main__': fastas = sys.argv[1:] calls = [(align_trim, [fasta]) for fasta in fastas] nones = mapPool(20, calls, daemonic=True, chunksize=50)
gpl-3.0
jmmease/pandas
pandas/core/util/hashing.py
6
10548
""" data hash pandas / numpy objects """ import itertools import numpy as np from pandas._libs import hashing, tslib from pandas.core.dtypes.generic import ( ABCMultiIndex, ABCIndexClass, ABCSeries, ABCDataFrame) from pandas.core.dtypes.common import ( is_categorical_dtype, is_list_like) from pandas.core.dtypes.missing import isna from pandas.core.dtypes.cast import infer_dtype_from_scalar # 16 byte long hashing key _default_hash_key = '0123456789123456' def _combine_hash_arrays(arrays, num_items): """ Parameters ---------- arrays : generator num_items : int Should be the same as CPython's tupleobject.c """ try: first = next(arrays) except StopIteration: return np.array([], dtype=np.uint64) arrays = itertools.chain([first], arrays) mult = np.uint64(1000003) out = np.zeros_like(first) + np.uint64(0x345678) for i, a in enumerate(arrays): inverse_i = num_items - i out ^= a out *= mult mult += np.uint64(82520 + inverse_i + inverse_i) assert i + 1 == num_items, 'Fed in wrong num_items' out += np.uint64(97531) return out def hash_pandas_object(obj, index=True, encoding='utf8', hash_key=None, categorize=True): """ Return a data hash of the Index/Series/DataFrame .. versionadded:: 0.19.2 Parameters ---------- index : boolean, default True include the index in the hash (if Series/DataFrame) encoding : string, default 'utf8' encoding for data & key when strings hash_key : string key to encode, default to _default_hash_key categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. .. versionadded:: 0.20.0 Returns ------- Series of uint64, same length as the object """ from pandas import Series if hash_key is None: hash_key = _default_hash_key if isinstance(obj, ABCMultiIndex): return Series(hash_tuples(obj, encoding, hash_key), dtype='uint64', copy=False) if isinstance(obj, ABCIndexClass): h = hash_array(obj.values, encoding, hash_key, categorize).astype('uint64', copy=False) h = Series(h, index=obj, dtype='uint64', copy=False) elif isinstance(obj, ABCSeries): h = hash_array(obj.values, encoding, hash_key, categorize).astype('uint64', copy=False) if index: index_iter = (hash_pandas_object(obj.index, index=False, encoding=encoding, hash_key=hash_key, categorize=categorize).values for _ in [None]) arrays = itertools.chain([h], index_iter) h = _combine_hash_arrays(arrays, 2) h = Series(h, index=obj.index, dtype='uint64', copy=False) elif isinstance(obj, ABCDataFrame): hashes = (hash_array(series.values) for _, series in obj.iteritems()) num_items = len(obj.columns) if index: index_hash_generator = (hash_pandas_object(obj.index, index=False, encoding=encoding, hash_key=hash_key, categorize=categorize).values # noqa for _ in [None]) num_items += 1 hashes = itertools.chain(hashes, index_hash_generator) h = _combine_hash_arrays(hashes, num_items) h = Series(h, index=obj.index, dtype='uint64', copy=False) else: raise TypeError("Unexpected type for hashing %s" % type(obj)) return h def hash_tuples(vals, encoding='utf8', hash_key=None): """ Hash an MultiIndex / list-of-tuples efficiently .. versionadded:: 0.20.0 Parameters ---------- vals : MultiIndex, list-of-tuples, or single tuple encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- ndarray of hashed values array """ is_tuple = False if isinstance(vals, tuple): vals = [vals] is_tuple = True elif not is_list_like(vals): raise TypeError("must be convertible to a list-of-tuples") from pandas import Categorical, MultiIndex if not isinstance(vals, ABCMultiIndex): vals = MultiIndex.from_tuples(vals) # create a list-of-Categoricals vals = [Categorical(vals.labels[level], vals.levels[level], ordered=False, fastpath=True) for level in range(vals.nlevels)] # hash the list-of-ndarrays hashes = (_hash_categorical(cat, encoding=encoding, hash_key=hash_key) for cat in vals) h = _combine_hash_arrays(hashes, len(vals)) if is_tuple: h = h[0] return h def hash_tuple(val, encoding='utf8', hash_key=None): """ Hash a single tuple efficiently Parameters ---------- val : single tuple encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- hash """ hashes = (_hash_scalar(v, encoding=encoding, hash_key=hash_key) for v in val) h = _combine_hash_arrays(hashes, len(val))[0] return h def _hash_categorical(c, encoding, hash_key): """ Hash a Categorical by hashing its categories, and then mapping the codes to the hashes Parameters ---------- c : Categorical encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- ndarray of hashed values array, same size as len(c) """ hashed = hash_array(c.categories.values, encoding, hash_key, categorize=False) # we have uint64, as we don't directly support missing values # we don't want to use take_nd which will coerce to float # instead, directly construt the result with a # max(np.uint64) as the missing value indicator # # TODO: GH 15362 mask = c.isna() if len(hashed): result = hashed.take(c.codes) else: result = np.zeros(len(mask), dtype='uint64') if mask.any(): result[mask] = np.iinfo(np.uint64).max return result def hash_array(vals, encoding='utf8', hash_key=None, categorize=True): """ Given a 1d array, return an array of deterministic integers. .. versionadded:: 0.19.2 Parameters ---------- vals : ndarray, Categorical encoding : string, default 'utf8' encoding for data & key when strings hash_key : string key to encode, default to _default_hash_key categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. .. versionadded:: 0.20.0 Returns ------- 1d uint64 numpy array of hash values, same length as the vals """ if not hasattr(vals, 'dtype'): raise TypeError("must pass a ndarray-like") dtype = vals.dtype if hash_key is None: hash_key = _default_hash_key # For categoricals, we hash the categories, then remap the codes to the # hash values. (This check is above the complex check so that we don't ask # numpy if categorical is a subdtype of complex, as it will choke). if is_categorical_dtype(dtype): return _hash_categorical(vals, encoding, hash_key) # we'll be working with everything as 64-bit values, so handle this # 128-bit value early elif np.issubdtype(dtype, np.complex128): return hash_array(vals.real) + 23 * hash_array(vals.imag) # First, turn whatever array this is into unsigned 64-bit ints, if we can # manage it. elif isinstance(dtype, np.bool): vals = vals.astype('u8') elif issubclass(dtype.type, (np.datetime64, np.timedelta64)): vals = vals.view('i8').astype('u8', copy=False) elif issubclass(dtype.type, np.number) and dtype.itemsize <= 8: vals = vals.view('u{}'.format(vals.dtype.itemsize)).astype('u8') else: # With repeated values, its MUCH faster to categorize object dtypes, # then hash and rename categories. We allow skipping the categorization # when the values are known/likely to be unique. if categorize: from pandas import factorize, Categorical, Index codes, categories = factorize(vals, sort=False) cat = Categorical(codes, Index(categories), ordered=False, fastpath=True) return _hash_categorical(cat, encoding, hash_key) try: vals = hashing.hash_object_array(vals, hash_key, encoding) except TypeError: # we have mixed types vals = hashing.hash_object_array(vals.astype(str).astype(object), hash_key, encoding) # Then, redistribute these 64-bit ints within the space of 64-bit ints vals ^= vals >> 30 vals *= np.uint64(0xbf58476d1ce4e5b9) vals ^= vals >> 27 vals *= np.uint64(0x94d049bb133111eb) vals ^= vals >> 31 return vals def _hash_scalar(val, encoding='utf8', hash_key=None): """ Hash scalar value Returns ------- 1d uint64 numpy array of hash value, of length 1 """ if isna(val): # this is to be consistent with the _hash_categorical implementation return np.array([np.iinfo(np.uint64).max], dtype='u8') if getattr(val, 'tzinfo', None) is not None: # for tz-aware datetimes, we need the underlying naive UTC value and # not the tz aware object or pd extension type (as # infer_dtype_from_scalar would do) if not isinstance(val, tslib.Timestamp): val = tslib.Timestamp(val) val = val.tz_convert(None) dtype, val = infer_dtype_from_scalar(val) vals = np.array([val], dtype=dtype) return hash_array(vals, hash_key=hash_key, encoding=encoding, categorize=False)
bsd-3-clause
rahlk/CSC579__Computer_Performance_Modeling
simulation/proj1/tasks/task5.py
1
2063
from __future__ import division from __future__ import print_function import os import sys import functools # Update path root = os.path.join(os.getcwd().split('proj1')[0], 'proj1') if root not in sys.path: sys.path.append(root) import numpy as np import pandas as pd import multiprocessing from pdb import set_trace from Simulator import simulate from Utils.PlotsUtils import line, line2 from Utils.RandomUtil import Random from Utils.MisclUtils import TimeUtil rand = Random() timer = TimeUtil() # Set seed rand.set_seed(seed_val=12458) def customer_loss_rate(customers): served = np.sum([customer.serviced for customer in customers]) total = len(customers) return served / total def plot_runtime(x=None, y=None): line(x, y, x_label=r"$\rho$", y_label=r"Run Times", the_title=r"$\mathrm{Run\ Times\ in\ }\mu\mathrm{s\ vs.\ }\rho$") def plot_runtime_vs_avg(x, y, y_1): line2(x, y, x, y_1, label_1="Actual Runtimes", label_2="Expected value of $\rho$", x_label=r"$\rho$", y_label=r"Run Times", the_title=r"$\mathrm{Run\ Times\ in\ }\mu\mathrm{s\ vs.\ }\rho$") def task_5(): rho_list = np.arange(0.05, 1, 0.1) C = 1e5 elapsed = [] for rho in rho_list: start_time = timer.current_time() serviced = simulate(l = rho, server_lim = 40, max_serviced=C, L=1, verbose=False) end_time = timer.current_time() elapsed.append(end_time-start_time) data = pd.DataFrame([[a,b] for a, b in zip(rho_list, elapsed)], columns=["Rho", "Seconds"]) data.to_csv(os.path.abspath(os.path.join(root,"tasks/task5.csv"))) def task5_plot(): data = pd.read_csv(os.path.abspath("tasks/task5.csv")) plot_runtime(data["Rho"], data["Seconds"]) set_trace() def compare_plot(): rho_list = np.arange(0.05, 1, 0.1) average_rho = [np.mean([rand.exponential(lam=p) for _ in xrange(10000)]) for p in rho_list] data = pd.read_csv(os.path.abspath("tasks/task5.csv")) plot_runtime(data["Rho"], average_rho) if __name__ == "__main__": task_5() task5_plot() compare_plot()
mit
maropu/spark
python/pyspark/ml/feature.py
15
212774
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from pyspark import since, keyword_only, SparkContext from pyspark.ml.linalg import _convert_to_vector from pyspark.ml.param.shared import HasThreshold, HasThresholds, HasInputCol, HasOutputCol, \ HasInputCols, HasOutputCols, HasHandleInvalid, HasRelativeError, HasFeaturesCol, HasLabelCol, \ HasSeed, HasNumFeatures, HasStepSize, HasMaxIter, TypeConverters, Param, Params from pyspark.ml.util import JavaMLReadable, JavaMLWritable from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaTransformer, _jvm from pyspark.ml.common import inherit_doc __all__ = ['Binarizer', 'BucketedRandomProjectionLSH', 'BucketedRandomProjectionLSHModel', 'Bucketizer', 'ChiSqSelector', 'ChiSqSelectorModel', 'CountVectorizer', 'CountVectorizerModel', 'DCT', 'ElementwiseProduct', 'FeatureHasher', 'HashingTF', 'IDF', 'IDFModel', 'Imputer', 'ImputerModel', 'IndexToString', 'Interaction', 'MaxAbsScaler', 'MaxAbsScalerModel', 'MinHashLSH', 'MinHashLSHModel', 'MinMaxScaler', 'MinMaxScalerModel', 'NGram', 'Normalizer', 'OneHotEncoder', 'OneHotEncoderModel', 'PCA', 'PCAModel', 'PolynomialExpansion', 'QuantileDiscretizer', 'RobustScaler', 'RobustScalerModel', 'RegexTokenizer', 'RFormula', 'RFormulaModel', 'SQLTransformer', 'StandardScaler', 'StandardScalerModel', 'StopWordsRemover', 'StringIndexer', 'StringIndexerModel', 'Tokenizer', 'UnivariateFeatureSelector', 'UnivariateFeatureSelectorModel', 'VarianceThresholdSelector', 'VarianceThresholdSelectorModel', 'VectorAssembler', 'VectorIndexer', 'VectorIndexerModel', 'VectorSizeHint', 'VectorSlicer', 'Word2Vec', 'Word2VecModel'] @inherit_doc class Binarizer(JavaTransformer, HasThreshold, HasThresholds, HasInputCol, HasOutputCol, HasInputCols, HasOutputCols, JavaMLReadable, JavaMLWritable): """ Binarize a column of continuous features given a threshold. Since 3.0.0, :py:class:`Binarize` can map multiple columns at once by setting the :py:attr:`inputCols` parameter. Note that when both the :py:attr:`inputCol` and :py:attr:`inputCols` parameters are set, an Exception will be thrown. The :py:attr:`threshold` parameter is used for single column usage, and :py:attr:`thresholds` is for multiple columns. .. versionadded:: 1.4.0 Examples -------- >>> df = spark.createDataFrame([(0.5,)], ["values"]) >>> binarizer = Binarizer(threshold=1.0, inputCol="values", outputCol="features") >>> binarizer.setThreshold(1.0) Binarizer... >>> binarizer.setInputCol("values") Binarizer... >>> binarizer.setOutputCol("features") Binarizer... >>> binarizer.transform(df).head().features 0.0 >>> binarizer.setParams(outputCol="freqs").transform(df).head().freqs 0.0 >>> params = {binarizer.threshold: -0.5, binarizer.outputCol: "vector"} >>> binarizer.transform(df, params).head().vector 1.0 >>> binarizerPath = temp_path + "/binarizer" >>> binarizer.save(binarizerPath) >>> loadedBinarizer = Binarizer.load(binarizerPath) >>> loadedBinarizer.getThreshold() == binarizer.getThreshold() True >>> loadedBinarizer.transform(df).take(1) == binarizer.transform(df).take(1) True >>> df2 = spark.createDataFrame([(0.5, 0.3)], ["values1", "values2"]) >>> binarizer2 = Binarizer(thresholds=[0.0, 1.0]) >>> binarizer2.setInputCols(["values1", "values2"]).setOutputCols(["output1", "output2"]) Binarizer... >>> binarizer2.transform(df2).show() +-------+-------+-------+-------+ |values1|values2|output1|output2| +-------+-------+-------+-------+ | 0.5| 0.3| 1.0| 0.0| +-------+-------+-------+-------+ ... """ threshold = Param(Params._dummy(), "threshold", "Param for threshold used to binarize continuous features. " + "The features greater than the threshold will be binarized to 1.0. " + "The features equal to or less than the threshold will be binarized to 0.0", typeConverter=TypeConverters.toFloat) thresholds = Param(Params._dummy(), "thresholds", "Param for array of threshold used to binarize continuous features. " + "This is for multiple columns input. If transforming multiple columns " + "and thresholds is not set, but threshold is set, then threshold will " + "be applied across all columns.", typeConverter=TypeConverters.toListFloat) @keyword_only def __init__(self, *, threshold=0.0, inputCol=None, outputCol=None, thresholds=None, inputCols=None, outputCols=None): """ __init__(self, \\*, threshold=0.0, inputCol=None, outputCol=None, thresholds=None, \ inputCols=None, outputCols=None) """ super(Binarizer, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Binarizer", self.uid) self._setDefault(threshold=0.0) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, *, threshold=0.0, inputCol=None, outputCol=None, thresholds=None, inputCols=None, outputCols=None): """ setParams(self, \\*, threshold=0.0, inputCol=None, outputCol=None, thresholds=None, \ inputCols=None, outputCols=None) Sets params for this Binarizer. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.4.0") def setThreshold(self, value): """ Sets the value of :py:attr:`threshold`. """ return self._set(threshold=value) @since("3.0.0") def setThresholds(self, value): """ Sets the value of :py:attr:`thresholds`. """ return self._set(thresholds=value) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setInputCols(self, value): """ Sets the value of :py:attr:`inputCols`. """ return self._set(inputCols=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @since("3.0.0") def setOutputCols(self, value): """ Sets the value of :py:attr:`outputCols`. """ return self._set(outputCols=value) class _LSHParams(HasInputCol, HasOutputCol): """ Mixin for Locality Sensitive Hashing (LSH) algorithm parameters. """ numHashTables = Param(Params._dummy(), "numHashTables", "number of hash tables, where " + "increasing number of hash tables lowers the false negative rate, " + "and decreasing it improves the running performance.", typeConverter=TypeConverters.toInt) def __init__(self, *args): super(_LSHParams, self).__init__(*args) self._setDefault(numHashTables=1) def getNumHashTables(self): """ Gets the value of numHashTables or its default value. """ return self.getOrDefault(self.numHashTables) class _LSH(JavaEstimator, _LSHParams, JavaMLReadable, JavaMLWritable): """ Mixin for Locality Sensitive Hashing (LSH). """ def setNumHashTables(self, value): """ Sets the value of :py:attr:`numHashTables`. """ return self._set(numHashTables=value) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) class _LSHModel(JavaModel, _LSHParams): """ Mixin for Locality Sensitive Hashing (LSH) models. """ def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def approxNearestNeighbors(self, dataset, key, numNearestNeighbors, distCol="distCol"): """ Given a large dataset and an item, approximately find at most k items which have the closest distance to the item. If the :py:attr:`outputCol` is missing, the method will transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows caching of the transformed data when necessary. Notes ----- This method is experimental and will likely change behavior in the next release. Parameters ---------- dataset : :py:class:`pyspark.sql.DataFrame` The dataset to search for nearest neighbors of the key. key : :py:class:`pyspark.ml.linalg.Vector` Feature vector representing the item to search for. numNearestNeighbors : int The maximum number of nearest neighbors. distCol : str Output column for storing the distance between each result row and the key. Use "distCol" as default value if it's not specified. Returns ------- :py:class:`pyspark.sql.DataFrame` A dataset containing at most k items closest to the key. A column "distCol" is added to show the distance between each row and the key. """ return self._call_java("approxNearestNeighbors", dataset, key, numNearestNeighbors, distCol) def approxSimilarityJoin(self, datasetA, datasetB, threshold, distCol="distCol"): """ Join two datasets to approximately find all pairs of rows whose distance are smaller than the threshold. If the :py:attr:`outputCol` is missing, the method will transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows caching of the transformed data when necessary. Parameters ---------- datasetA : :py:class:`pyspark.sql.DataFrame` One of the datasets to join. datasetB : :py:class:`pyspark.sql.DataFrame` Another dataset to join. threshold : float The threshold for the distance of row pairs. distCol : str, optional Output column for storing the distance between each pair of rows. Use "distCol" as default value if it's not specified. Returns ------- :py:class:`pyspark.sql.DataFrame` A joined dataset containing pairs of rows. The original rows are in columns "datasetA" and "datasetB", and a column "distCol" is added to show the distance between each pair. """ threshold = TypeConverters.toFloat(threshold) return self._call_java("approxSimilarityJoin", datasetA, datasetB, threshold, distCol) class _BucketedRandomProjectionLSHParams(): """ Params for :py:class:`BucketedRandomProjectionLSH` and :py:class:`BucketedRandomProjectionLSHModel`. .. versionadded:: 3.0.0 """ bucketLength = Param(Params._dummy(), "bucketLength", "the length of each hash bucket, " + "a larger bucket lowers the false negative rate.", typeConverter=TypeConverters.toFloat) @since("2.2.0") def getBucketLength(self): """ Gets the value of bucketLength or its default value. """ return self.getOrDefault(self.bucketLength) @inherit_doc class BucketedRandomProjectionLSH(_LSH, _BucketedRandomProjectionLSHParams, HasSeed, JavaMLReadable, JavaMLWritable): """ LSH class for Euclidean distance metrics. The input is dense or sparse vectors, each of which represents a point in the Euclidean distance space. The output will be vectors of configurable dimension. Hash values in the same dimension are calculated by the same hash function. .. versionadded:: 2.2.0 Notes ----- - `Stable Distributions in Wikipedia article on Locality-sensitive hashing \ <https://en.wikipedia.org/wiki/Locality-sensitive_hashing#Stable_distributions>`_ - `Hashing for Similarity Search: A Survey <https://arxiv.org/abs/1408.2927>`_ Examples -------- >>> from pyspark.ml.linalg import Vectors >>> from pyspark.sql.functions import col >>> data = [(0, Vectors.dense([-1.0, -1.0 ]),), ... (1, Vectors.dense([-1.0, 1.0 ]),), ... (2, Vectors.dense([1.0, -1.0 ]),), ... (3, Vectors.dense([1.0, 1.0]),)] >>> df = spark.createDataFrame(data, ["id", "features"]) >>> brp = BucketedRandomProjectionLSH() >>> brp.setInputCol("features") BucketedRandomProjectionLSH... >>> brp.setOutputCol("hashes") BucketedRandomProjectionLSH... >>> brp.setSeed(12345) BucketedRandomProjectionLSH... >>> brp.setBucketLength(1.0) BucketedRandomProjectionLSH... >>> model = brp.fit(df) >>> model.getBucketLength() 1.0 >>> model.setOutputCol("hashes") BucketedRandomProjectionLSHModel... >>> model.transform(df).head() Row(id=0, features=DenseVector([-1.0, -1.0]), hashes=[DenseVector([-1.0])]) >>> data2 = [(4, Vectors.dense([2.0, 2.0 ]),), ... (5, Vectors.dense([2.0, 3.0 ]),), ... (6, Vectors.dense([3.0, 2.0 ]),), ... (7, Vectors.dense([3.0, 3.0]),)] >>> df2 = spark.createDataFrame(data2, ["id", "features"]) >>> model.approxNearestNeighbors(df2, Vectors.dense([1.0, 2.0]), 1).collect() [Row(id=4, features=DenseVector([2.0, 2.0]), hashes=[DenseVector([1.0])], distCol=1.0)] >>> model.approxSimilarityJoin(df, df2, 3.0, distCol="EuclideanDistance").select( ... col("datasetA.id").alias("idA"), ... col("datasetB.id").alias("idB"), ... col("EuclideanDistance")).show() +---+---+-----------------+ |idA|idB|EuclideanDistance| +---+---+-----------------+ | 3| 6| 2.23606797749979| +---+---+-----------------+ ... >>> model.approxSimilarityJoin(df, df2, 3, distCol="EuclideanDistance").select( ... col("datasetA.id").alias("idA"), ... col("datasetB.id").alias("idB"), ... col("EuclideanDistance")).show() +---+---+-----------------+ |idA|idB|EuclideanDistance| +---+---+-----------------+ | 3| 6| 2.23606797749979| +---+---+-----------------+ ... >>> brpPath = temp_path + "/brp" >>> brp.save(brpPath) >>> brp2 = BucketedRandomProjectionLSH.load(brpPath) >>> brp2.getBucketLength() == brp.getBucketLength() True >>> modelPath = temp_path + "/brp-model" >>> model.save(modelPath) >>> model2 = BucketedRandomProjectionLSHModel.load(modelPath) >>> model.transform(df).head().hashes == model2.transform(df).head().hashes True """ @keyword_only def __init__(self, *, inputCol=None, outputCol=None, seed=None, numHashTables=1, bucketLength=None): """ __init__(self, \\*, inputCol=None, outputCol=None, seed=None, numHashTables=1, \ bucketLength=None) """ super(BucketedRandomProjectionLSH, self).__init__() self._java_obj = \ self._new_java_obj("org.apache.spark.ml.feature.BucketedRandomProjectionLSH", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("2.2.0") def setParams(self, *, inputCol=None, outputCol=None, seed=None, numHashTables=1, bucketLength=None): """ setParams(self, \\*, inputCol=None, outputCol=None, seed=None, numHashTables=1, \ bucketLength=None) Sets params for this BucketedRandomProjectionLSH. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("2.2.0") def setBucketLength(self, value): """ Sets the value of :py:attr:`bucketLength`. """ return self._set(bucketLength=value) def setSeed(self, value): """ Sets the value of :py:attr:`seed`. """ return self._set(seed=value) def _create_model(self, java_model): return BucketedRandomProjectionLSHModel(java_model) class BucketedRandomProjectionLSHModel(_LSHModel, _BucketedRandomProjectionLSHParams, JavaMLReadable, JavaMLWritable): r""" Model fitted by :py:class:`BucketedRandomProjectionLSH`, where multiple random vectors are stored. The vectors are normalized to be unit vectors and each vector is used in a hash function: :math:`h_i(x) = floor(r_i \cdot x / bucketLength)` where :math:`r_i` is the i-th random unit vector. The number of buckets will be `(max L2 norm of input vectors) / bucketLength`. .. versionadded:: 2.2.0 """ @inherit_doc class Bucketizer(JavaTransformer, HasInputCol, HasOutputCol, HasInputCols, HasOutputCols, HasHandleInvalid, JavaMLReadable, JavaMLWritable): """ Maps a column of continuous features to a column of feature buckets. Since 3.0.0, :py:class:`Bucketizer` can map multiple columns at once by setting the :py:attr:`inputCols` parameter. Note that when both the :py:attr:`inputCol` and :py:attr:`inputCols` parameters are set, an Exception will be thrown. The :py:attr:`splits` parameter is only used for single column usage, and :py:attr:`splitsArray` is for multiple columns. .. versionadded:: 1.4.0 Examples -------- >>> values = [(0.1, 0.0), (0.4, 1.0), (1.2, 1.3), (1.5, float("nan")), ... (float("nan"), 1.0), (float("nan"), 0.0)] >>> df = spark.createDataFrame(values, ["values1", "values2"]) >>> bucketizer = Bucketizer() >>> bucketizer.setSplits([-float("inf"), 0.5, 1.4, float("inf")]) Bucketizer... >>> bucketizer.setInputCol("values1") Bucketizer... >>> bucketizer.setOutputCol("buckets") Bucketizer... >>> bucketed = bucketizer.setHandleInvalid("keep").transform(df).collect() >>> bucketed = bucketizer.setHandleInvalid("keep").transform(df.select("values1")) >>> bucketed.show(truncate=False) +-------+-------+ |values1|buckets| +-------+-------+ |0.1 |0.0 | |0.4 |0.0 | |1.2 |1.0 | |1.5 |2.0 | |NaN |3.0 | |NaN |3.0 | +-------+-------+ ... >>> bucketizer.setParams(outputCol="b").transform(df).head().b 0.0 >>> bucketizerPath = temp_path + "/bucketizer" >>> bucketizer.save(bucketizerPath) >>> loadedBucketizer = Bucketizer.load(bucketizerPath) >>> loadedBucketizer.getSplits() == bucketizer.getSplits() True >>> loadedBucketizer.transform(df).take(1) == bucketizer.transform(df).take(1) True >>> bucketed = bucketizer.setHandleInvalid("skip").transform(df).collect() >>> len(bucketed) 4 >>> bucketizer2 = Bucketizer(splitsArray= ... [[-float("inf"), 0.5, 1.4, float("inf")], [-float("inf"), 0.5, float("inf")]], ... inputCols=["values1", "values2"], outputCols=["buckets1", "buckets2"]) >>> bucketed2 = bucketizer2.setHandleInvalid("keep").transform(df) >>> bucketed2.show(truncate=False) +-------+-------+--------+--------+ |values1|values2|buckets1|buckets2| +-------+-------+--------+--------+ |0.1 |0.0 |0.0 |0.0 | |0.4 |1.0 |0.0 |1.0 | |1.2 |1.3 |1.0 |1.0 | |1.5 |NaN |2.0 |2.0 | |NaN |1.0 |3.0 |1.0 | |NaN |0.0 |3.0 |0.0 | +-------+-------+--------+--------+ ... """ splits = \ Param(Params._dummy(), "splits", "Split points for mapping continuous features into buckets. With n+1 splits, " + "there are n buckets. A bucket defined by splits x,y holds values in the " + "range [x,y) except the last bucket, which also includes y. The splits " + "should be of length >= 3 and strictly increasing. Values at -inf, inf must be " + "explicitly provided to cover all Double values; otherwise, values outside the " + "splits specified will be treated as errors.", typeConverter=TypeConverters.toListFloat) handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries " "containing NaN values. Values outside the splits will always be treated " "as errors. Options are 'skip' (filter out rows with invalid values), " + "'error' (throw an error), or 'keep' (keep invalid values in a " + "special additional bucket). Note that in the multiple column " + "case, the invalid handling is applied to all columns. That said " + "for 'error' it will throw an error if any invalids are found in " + "any column, for 'skip' it will skip rows with any invalids in " + "any columns, etc.", typeConverter=TypeConverters.toString) splitsArray = Param(Params._dummy(), "splitsArray", "The array of split points for mapping " + "continuous features into buckets for multiple columns. For each input " + "column, with n+1 splits, there are n buckets. A bucket defined by " + "splits x,y holds values in the range [x,y) except the last bucket, " + "which also includes y. The splits should be of length >= 3 and " + "strictly increasing. Values at -inf, inf must be explicitly provided " + "to cover all Double values; otherwise, values outside the splits " + "specified will be treated as errors.", typeConverter=TypeConverters.toListListFloat) @keyword_only def __init__(self, *, splits=None, inputCol=None, outputCol=None, handleInvalid="error", splitsArray=None, inputCols=None, outputCols=None): """ __init__(self, \\*, splits=None, inputCol=None, outputCol=None, handleInvalid="error", \ splitsArray=None, inputCols=None, outputCols=None) """ super(Bucketizer, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Bucketizer", self.uid) self._setDefault(handleInvalid="error") kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, *, splits=None, inputCol=None, outputCol=None, handleInvalid="error", splitsArray=None, inputCols=None, outputCols=None): """ setParams(self, \\*, splits=None, inputCol=None, outputCol=None, handleInvalid="error", \ splitsArray=None, inputCols=None, outputCols=None) Sets params for this Bucketizer. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.4.0") def setSplits(self, value): """ Sets the value of :py:attr:`splits`. """ return self._set(splits=value) @since("1.4.0") def getSplits(self): """ Gets the value of threshold or its default value. """ return self.getOrDefault(self.splits) @since("3.0.0") def setSplitsArray(self, value): """ Sets the value of :py:attr:`splitsArray`. """ return self._set(splitsArray=value) @since("3.0.0") def getSplitsArray(self): """ Gets the array of split points or its default value. """ return self.getOrDefault(self.splitsArray) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setInputCols(self, value): """ Sets the value of :py:attr:`inputCols`. """ return self._set(inputCols=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @since("3.0.0") def setOutputCols(self, value): """ Sets the value of :py:attr:`outputCols`. """ return self._set(outputCols=value) def setHandleInvalid(self, value): """ Sets the value of :py:attr:`handleInvalid`. """ return self._set(handleInvalid=value) class _CountVectorizerParams(JavaParams, HasInputCol, HasOutputCol): """ Params for :py:class:`CountVectorizer` and :py:class:`CountVectorizerModel`. """ minTF = Param( Params._dummy(), "minTF", "Filter to ignore rare words in" + " a document. For each document, terms with frequency/count less than the given" + " threshold are ignored. If this is an integer >= 1, then this specifies a count (of" + " times the term must appear in the document); if this is a double in [0,1), then this " + "specifies a fraction (out of the document's token count). Note that the parameter is " + "only used in transform of CountVectorizerModel and does not affect fitting. Default 1.0", typeConverter=TypeConverters.toFloat) minDF = Param( Params._dummy(), "minDF", "Specifies the minimum number of" + " different documents a term must appear in to be included in the vocabulary." + " If this is an integer >= 1, this specifies the number of documents the term must" + " appear in; if this is a double in [0,1), then this specifies the fraction of documents." + " Default 1.0", typeConverter=TypeConverters.toFloat) maxDF = Param( Params._dummy(), "maxDF", "Specifies the maximum number of" + " different documents a term could appear in to be included in the vocabulary." + " A term that appears more than the threshold will be ignored. If this is an" + " integer >= 1, this specifies the maximum number of documents the term could appear in;" + " if this is a double in [0,1), then this specifies the maximum" + " fraction of documents the term could appear in." + " Default (2^63) - 1", typeConverter=TypeConverters.toFloat) vocabSize = Param( Params._dummy(), "vocabSize", "max size of the vocabulary. Default 1 << 18.", typeConverter=TypeConverters.toInt) binary = Param( Params._dummy(), "binary", "Binary toggle to control the output vector values." + " If True, all nonzero counts (after minTF filter applied) are set to 1. This is useful" + " for discrete probabilistic models that model binary events rather than integer counts." + " Default False", typeConverter=TypeConverters.toBoolean) def __init__(self, *args): super(_CountVectorizerParams, self).__init__(*args) self._setDefault(minTF=1.0, minDF=1.0, maxDF=2 ** 63 - 1, vocabSize=1 << 18, binary=False) @since("1.6.0") def getMinTF(self): """ Gets the value of minTF or its default value. """ return self.getOrDefault(self.minTF) @since("1.6.0") def getMinDF(self): """ Gets the value of minDF or its default value. """ return self.getOrDefault(self.minDF) @since("2.4.0") def getMaxDF(self): """ Gets the value of maxDF or its default value. """ return self.getOrDefault(self.maxDF) @since("1.6.0") def getVocabSize(self): """ Gets the value of vocabSize or its default value. """ return self.getOrDefault(self.vocabSize) @since("2.0.0") def getBinary(self): """ Gets the value of binary or its default value. """ return self.getOrDefault(self.binary) @inherit_doc class CountVectorizer(JavaEstimator, _CountVectorizerParams, JavaMLReadable, JavaMLWritable): """ Extracts a vocabulary from document collections and generates a :py:attr:`CountVectorizerModel`. .. versionadded:: 1.6.0 Examples -------- >>> df = spark.createDataFrame( ... [(0, ["a", "b", "c"]), (1, ["a", "b", "b", "c", "a"])], ... ["label", "raw"]) >>> cv = CountVectorizer() >>> cv.setInputCol("raw") CountVectorizer... >>> cv.setOutputCol("vectors") CountVectorizer... >>> model = cv.fit(df) >>> model.setInputCol("raw") CountVectorizerModel... >>> model.transform(df).show(truncate=False) +-----+---------------+-------------------------+ |label|raw |vectors | +-----+---------------+-------------------------+ |0 |[a, b, c] |(3,[0,1,2],[1.0,1.0,1.0])| |1 |[a, b, b, c, a]|(3,[0,1,2],[2.0,2.0,1.0])| +-----+---------------+-------------------------+ ... >>> sorted(model.vocabulary) == ['a', 'b', 'c'] True >>> countVectorizerPath = temp_path + "/count-vectorizer" >>> cv.save(countVectorizerPath) >>> loadedCv = CountVectorizer.load(countVectorizerPath) >>> loadedCv.getMinDF() == cv.getMinDF() True >>> loadedCv.getMinTF() == cv.getMinTF() True >>> loadedCv.getVocabSize() == cv.getVocabSize() True >>> modelPath = temp_path + "/count-vectorizer-model" >>> model.save(modelPath) >>> loadedModel = CountVectorizerModel.load(modelPath) >>> loadedModel.vocabulary == model.vocabulary True >>> loadedModel.transform(df).take(1) == model.transform(df).take(1) True >>> fromVocabModel = CountVectorizerModel.from_vocabulary(["a", "b", "c"], ... inputCol="raw", outputCol="vectors") >>> fromVocabModel.transform(df).show(truncate=False) +-----+---------------+-------------------------+ |label|raw |vectors | +-----+---------------+-------------------------+ |0 |[a, b, c] |(3,[0,1,2],[1.0,1.0,1.0])| |1 |[a, b, b, c, a]|(3,[0,1,2],[2.0,2.0,1.0])| +-----+---------------+-------------------------+ ... """ @keyword_only def __init__(self, *, minTF=1.0, minDF=1.0, maxDF=2 ** 63 - 1, vocabSize=1 << 18, binary=False, inputCol=None, outputCol=None): """ __init__(self, \\*, minTF=1.0, minDF=1.0, maxDF=2 ** 63 - 1, vocabSize=1 << 18,\ binary=False, inputCol=None,outputCol=None) """ super(CountVectorizer, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.CountVectorizer", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.6.0") def setParams(self, *, minTF=1.0, minDF=1.0, maxDF=2 ** 63 - 1, vocabSize=1 << 18, binary=False, inputCol=None, outputCol=None): """ setParams(self, \\*, minTF=1.0, minDF=1.0, maxDF=2 ** 63 - 1, vocabSize=1 << 18,\ binary=False, inputCol=None, outputCol=None) Set the params for the CountVectorizer """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.6.0") def setMinTF(self, value): """ Sets the value of :py:attr:`minTF`. """ return self._set(minTF=value) @since("1.6.0") def setMinDF(self, value): """ Sets the value of :py:attr:`minDF`. """ return self._set(minDF=value) @since("2.4.0") def setMaxDF(self, value): """ Sets the value of :py:attr:`maxDF`. """ return self._set(maxDF=value) @since("1.6.0") def setVocabSize(self, value): """ Sets the value of :py:attr:`vocabSize`. """ return self._set(vocabSize=value) @since("2.0.0") def setBinary(self, value): """ Sets the value of :py:attr:`binary`. """ return self._set(binary=value) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def _create_model(self, java_model): return CountVectorizerModel(java_model) @inherit_doc class CountVectorizerModel(JavaModel, _CountVectorizerParams, JavaMLReadable, JavaMLWritable): """ Model fitted by :py:class:`CountVectorizer`. .. versionadded:: 1.6.0 """ @since("3.0.0") def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @classmethod @since("2.4.0") def from_vocabulary(cls, vocabulary, inputCol, outputCol=None, minTF=None, binary=None): """ Construct the model directly from a vocabulary list of strings, requires an active SparkContext. """ sc = SparkContext._active_spark_context java_class = sc._gateway.jvm.java.lang.String jvocab = CountVectorizerModel._new_java_array(vocabulary, java_class) model = CountVectorizerModel._create_from_java_class( "org.apache.spark.ml.feature.CountVectorizerModel", jvocab) model.setInputCol(inputCol) if outputCol is not None: model.setOutputCol(outputCol) if minTF is not None: model.setMinTF(minTF) if binary is not None: model.setBinary(binary) model._set(vocabSize=len(vocabulary)) return model @property @since("1.6.0") def vocabulary(self): """ An array of terms in the vocabulary. """ return self._call_java("vocabulary") @since("2.4.0") def setMinTF(self, value): """ Sets the value of :py:attr:`minTF`. """ return self._set(minTF=value) @since("2.4.0") def setBinary(self, value): """ Sets the value of :py:attr:`binary`. """ return self._set(binary=value) @inherit_doc class DCT(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable): """ A feature transformer that takes the 1D discrete cosine transform of a real vector. No zero padding is performed on the input vector. It returns a real vector of the same length representing the DCT. The return vector is scaled such that the transform matrix is unitary (aka scaled DCT-II). .. versionadded:: 1.6.0 Notes ----- `More information on Wikipedia \ <https://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II Wikipedia>`_. Examples -------- >>> from pyspark.ml.linalg import Vectors >>> df1 = spark.createDataFrame([(Vectors.dense([5.0, 8.0, 6.0]),)], ["vec"]) >>> dct = DCT( ) >>> dct.setInverse(False) DCT... >>> dct.setInputCol("vec") DCT... >>> dct.setOutputCol("resultVec") DCT... >>> df2 = dct.transform(df1) >>> df2.head().resultVec DenseVector([10.969..., -0.707..., -2.041...]) >>> df3 = DCT(inverse=True, inputCol="resultVec", outputCol="origVec").transform(df2) >>> df3.head().origVec DenseVector([5.0, 8.0, 6.0]) >>> dctPath = temp_path + "/dct" >>> dct.save(dctPath) >>> loadedDtc = DCT.load(dctPath) >>> loadedDtc.transform(df1).take(1) == dct.transform(df1).take(1) True >>> loadedDtc.getInverse() False """ inverse = Param(Params._dummy(), "inverse", "Set transformer to perform inverse DCT, " + "default False.", typeConverter=TypeConverters.toBoolean) @keyword_only def __init__(self, *, inverse=False, inputCol=None, outputCol=None): """ __init__(self, \\*, inverse=False, inputCol=None, outputCol=None) """ super(DCT, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.DCT", self.uid) self._setDefault(inverse=False) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.6.0") def setParams(self, *, inverse=False, inputCol=None, outputCol=None): """ setParams(self, \\*, inverse=False, inputCol=None, outputCol=None) Sets params for this DCT. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.6.0") def setInverse(self, value): """ Sets the value of :py:attr:`inverse`. """ return self._set(inverse=value) @since("1.6.0") def getInverse(self): """ Gets the value of inverse or its default value. """ return self.getOrDefault(self.inverse) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @inherit_doc class ElementwiseProduct(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable): """ Outputs the Hadamard product (i.e., the element-wise product) of each input vector with a provided "weight" vector. In other words, it scales each column of the dataset by a scalar multiplier. .. versionadded:: 1.5.0 Examples -------- >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([(Vectors.dense([2.0, 1.0, 3.0]),)], ["values"]) >>> ep = ElementwiseProduct() >>> ep.setScalingVec(Vectors.dense([1.0, 2.0, 3.0])) ElementwiseProduct... >>> ep.setInputCol("values") ElementwiseProduct... >>> ep.setOutputCol("eprod") ElementwiseProduct... >>> ep.transform(df).head().eprod DenseVector([2.0, 2.0, 9.0]) >>> ep.setParams(scalingVec=Vectors.dense([2.0, 3.0, 5.0])).transform(df).head().eprod DenseVector([4.0, 3.0, 15.0]) >>> elementwiseProductPath = temp_path + "/elementwise-product" >>> ep.save(elementwiseProductPath) >>> loadedEp = ElementwiseProduct.load(elementwiseProductPath) >>> loadedEp.getScalingVec() == ep.getScalingVec() True >>> loadedEp.transform(df).take(1) == ep.transform(df).take(1) True """ scalingVec = Param(Params._dummy(), "scalingVec", "Vector for hadamard product.", typeConverter=TypeConverters.toVector) @keyword_only def __init__(self, *, scalingVec=None, inputCol=None, outputCol=None): """ __init__(self, \\*, scalingVec=None, inputCol=None, outputCol=None) """ super(ElementwiseProduct, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.ElementwiseProduct", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.5.0") def setParams(self, *, scalingVec=None, inputCol=None, outputCol=None): """ setParams(self, \\*, scalingVec=None, inputCol=None, outputCol=None) Sets params for this ElementwiseProduct. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("2.0.0") def setScalingVec(self, value): """ Sets the value of :py:attr:`scalingVec`. """ return self._set(scalingVec=value) @since("2.0.0") def getScalingVec(self): """ Gets the value of scalingVec or its default value. """ return self.getOrDefault(self.scalingVec) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @inherit_doc class FeatureHasher(JavaTransformer, HasInputCols, HasOutputCol, HasNumFeatures, JavaMLReadable, JavaMLWritable): """ Feature hashing projects a set of categorical or numerical features into a feature vector of specified dimension (typically substantially smaller than that of the original feature space). This is done using the hashing trick (https://en.wikipedia.org/wiki/Feature_hashing) to map features to indices in the feature vector. The FeatureHasher transformer operates on multiple columns. Each column may contain either numeric or categorical features. Behavior and handling of column data types is as follows: * Numeric columns: For numeric features, the hash value of the column name is used to map the feature value to its index in the feature vector. By default, numeric features are not treated as categorical (even when they are integers). To treat them as categorical, specify the relevant columns in `categoricalCols`. * String columns: For categorical features, the hash value of the string "column_name=value" is used to map to the vector index, with an indicator value of `1.0`. Thus, categorical features are "one-hot" encoded (similarly to using :py:class:`OneHotEncoder` with `dropLast=false`). * Boolean columns: Boolean values are treated in the same way as string columns. That is, boolean features are represented as "column_name=true" or "column_name=false", with an indicator value of `1.0`. Null (missing) values are ignored (implicitly zero in the resulting feature vector). Since a simple modulo is used to transform the hash function to a vector index, it is advisable to use a power of two as the `numFeatures` parameter; otherwise the features will not be mapped evenly to the vector indices. .. versionadded:: 2.3.0 Examples -------- >>> data = [(2.0, True, "1", "foo"), (3.0, False, "2", "bar")] >>> cols = ["real", "bool", "stringNum", "string"] >>> df = spark.createDataFrame(data, cols) >>> hasher = FeatureHasher() >>> hasher.setInputCols(cols) FeatureHasher... >>> hasher.setOutputCol("features") FeatureHasher... >>> hasher.transform(df).head().features SparseVector(262144, {174475: 2.0, 247670: 1.0, 257907: 1.0, 262126: 1.0}) >>> hasher.setCategoricalCols(["real"]).transform(df).head().features SparseVector(262144, {171257: 1.0, 247670: 1.0, 257907: 1.0, 262126: 1.0}) >>> hasherPath = temp_path + "/hasher" >>> hasher.save(hasherPath) >>> loadedHasher = FeatureHasher.load(hasherPath) >>> loadedHasher.getNumFeatures() == hasher.getNumFeatures() True >>> loadedHasher.transform(df).head().features == hasher.transform(df).head().features True """ categoricalCols = Param(Params._dummy(), "categoricalCols", "numeric columns to treat as categorical", typeConverter=TypeConverters.toListString) @keyword_only def __init__(self, *, numFeatures=1 << 18, inputCols=None, outputCol=None, categoricalCols=None): """ __init__(self, \\*, numFeatures=1 << 18, inputCols=None, outputCol=None, \ categoricalCols=None) """ super(FeatureHasher, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.FeatureHasher", self.uid) self._setDefault(numFeatures=1 << 18) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("2.3.0") def setParams(self, *, numFeatures=1 << 18, inputCols=None, outputCol=None, categoricalCols=None): """ setParams(self, \\*, numFeatures=1 << 18, inputCols=None, outputCol=None, \ categoricalCols=None) Sets params for this FeatureHasher. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("2.3.0") def setCategoricalCols(self, value): """ Sets the value of :py:attr:`categoricalCols`. """ return self._set(categoricalCols=value) @since("2.3.0") def getCategoricalCols(self): """ Gets the value of binary or its default value. """ return self.getOrDefault(self.categoricalCols) def setInputCols(self, value): """ Sets the value of :py:attr:`inputCols`. """ return self._set(inputCols=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def setNumFeatures(self, value): """ Sets the value of :py:attr:`numFeatures`. """ return self._set(numFeatures=value) @inherit_doc class HashingTF(JavaTransformer, HasInputCol, HasOutputCol, HasNumFeatures, JavaMLReadable, JavaMLWritable): """ Maps a sequence of terms to their term frequencies using the hashing trick. Currently we use Austin Appleby's MurmurHash 3 algorithm (MurmurHash3_x86_32) to calculate the hash code value for the term object. Since a simple modulo is used to transform the hash function to a column index, it is advisable to use a power of two as the numFeatures parameter; otherwise the features will not be mapped evenly to the columns. .. versionadded:: 1.3.0 Examples -------- >>> df = spark.createDataFrame([(["a", "b", "c"],)], ["words"]) >>> hashingTF = HashingTF(inputCol="words", outputCol="features") >>> hashingTF.setNumFeatures(10) HashingTF... >>> hashingTF.transform(df).head().features SparseVector(10, {5: 1.0, 7: 1.0, 8: 1.0}) >>> hashingTF.setParams(outputCol="freqs").transform(df).head().freqs SparseVector(10, {5: 1.0, 7: 1.0, 8: 1.0}) >>> params = {hashingTF.numFeatures: 5, hashingTF.outputCol: "vector"} >>> hashingTF.transform(df, params).head().vector SparseVector(5, {0: 1.0, 2: 1.0, 3: 1.0}) >>> hashingTFPath = temp_path + "/hashing-tf" >>> hashingTF.save(hashingTFPath) >>> loadedHashingTF = HashingTF.load(hashingTFPath) >>> loadedHashingTF.getNumFeatures() == hashingTF.getNumFeatures() True >>> loadedHashingTF.transform(df).take(1) == hashingTF.transform(df).take(1) True >>> hashingTF.indexOf("b") 5 """ binary = Param(Params._dummy(), "binary", "If True, all non zero counts are set to 1. " + "This is useful for discrete probabilistic models that model binary events " + "rather than integer counts. Default False.", typeConverter=TypeConverters.toBoolean) @keyword_only def __init__(self, *, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None): """ __init__(self, \\*, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None) """ super(HashingTF, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.HashingTF", self.uid) self._setDefault(numFeatures=1 << 18, binary=False) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.3.0") def setParams(self, *, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None): """ setParams(self, \\*, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None) Sets params for this HashingTF. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("2.0.0") def setBinary(self, value): """ Sets the value of :py:attr:`binary`. """ return self._set(binary=value) @since("2.0.0") def getBinary(self): """ Gets the value of binary or its default value. """ return self.getOrDefault(self.binary) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def setNumFeatures(self, value): """ Sets the value of :py:attr:`numFeatures`. """ return self._set(numFeatures=value) @since("3.0.0") def indexOf(self, term): """ Returns the index of the input term. """ self._transfer_params_to_java() return self._java_obj.indexOf(term) class _IDFParams(HasInputCol, HasOutputCol): """ Params for :py:class:`IDF` and :py:class:`IDFModel`. .. versionadded:: 3.0.0 """ minDocFreq = Param(Params._dummy(), "minDocFreq", "minimum number of documents in which a term should appear for filtering", typeConverter=TypeConverters.toInt) @since("1.4.0") def getMinDocFreq(self): """ Gets the value of minDocFreq or its default value. """ return self.getOrDefault(self.minDocFreq) def __init__(self, *args): super(_IDFParams, self).__init__(*args) self._setDefault(minDocFreq=0) @inherit_doc class IDF(JavaEstimator, _IDFParams, JavaMLReadable, JavaMLWritable): """ Compute the Inverse Document Frequency (IDF) given a collection of documents. .. versionadded:: 1.4.0 Examples -------- >>> from pyspark.ml.linalg import DenseVector >>> df = spark.createDataFrame([(DenseVector([1.0, 2.0]),), ... (DenseVector([0.0, 1.0]),), (DenseVector([3.0, 0.2]),)], ["tf"]) >>> idf = IDF(minDocFreq=3) >>> idf.setInputCol("tf") IDF... >>> idf.setOutputCol("idf") IDF... >>> model = idf.fit(df) >>> model.setOutputCol("idf") IDFModel... >>> model.getMinDocFreq() 3 >>> model.idf DenseVector([0.0, 0.0]) >>> model.docFreq [0, 3] >>> model.numDocs == df.count() True >>> model.transform(df).head().idf DenseVector([0.0, 0.0]) >>> idf.setParams(outputCol="freqs").fit(df).transform(df).collect()[1].freqs DenseVector([0.0, 0.0]) >>> params = {idf.minDocFreq: 1, idf.outputCol: "vector"} >>> idf.fit(df, params).transform(df).head().vector DenseVector([0.2877, 0.0]) >>> idfPath = temp_path + "/idf" >>> idf.save(idfPath) >>> loadedIdf = IDF.load(idfPath) >>> loadedIdf.getMinDocFreq() == idf.getMinDocFreq() True >>> modelPath = temp_path + "/idf-model" >>> model.save(modelPath) >>> loadedModel = IDFModel.load(modelPath) >>> loadedModel.transform(df).head().idf == model.transform(df).head().idf True """ @keyword_only def __init__(self, *, minDocFreq=0, inputCol=None, outputCol=None): """ __init__(self, \\*, minDocFreq=0, inputCol=None, outputCol=None) """ super(IDF, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.IDF", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, *, minDocFreq=0, inputCol=None, outputCol=None): """ setParams(self, \\*, minDocFreq=0, inputCol=None, outputCol=None) Sets params for this IDF. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.4.0") def setMinDocFreq(self, value): """ Sets the value of :py:attr:`minDocFreq`. """ return self._set(minDocFreq=value) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def _create_model(self, java_model): return IDFModel(java_model) class IDFModel(JavaModel, _IDFParams, JavaMLReadable, JavaMLWritable): """ Model fitted by :py:class:`IDF`. .. versionadded:: 1.4.0 """ @since("3.0.0") def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @property @since("2.0.0") def idf(self): """ Returns the IDF vector. """ return self._call_java("idf") @property @since("3.0.0") def docFreq(self): """ Returns the document frequency. """ return self._call_java("docFreq") @property @since("3.0.0") def numDocs(self): """ Returns number of documents evaluated to compute idf """ return self._call_java("numDocs") class _ImputerParams(HasInputCol, HasInputCols, HasOutputCol, HasOutputCols, HasRelativeError): """ Params for :py:class:`Imputer` and :py:class:`ImputerModel`. .. versionadded:: 3.0.0 """ strategy = Param(Params._dummy(), "strategy", "strategy for imputation. If mean, then replace missing values using the mean " "value of the feature. If median, then replace missing values using the " "median value of the feature. If mode, then replace missing using the most " "frequent value of the feature.", typeConverter=TypeConverters.toString) missingValue = Param(Params._dummy(), "missingValue", "The placeholder for the missing values. All occurrences of missingValue " "will be imputed.", typeConverter=TypeConverters.toFloat) def __init__(self, *args): super(_ImputerParams, self).__init__(*args) self._setDefault(strategy="mean", missingValue=float("nan"), relativeError=0.001) @since("2.2.0") def getStrategy(self): """ Gets the value of :py:attr:`strategy` or its default value. """ return self.getOrDefault(self.strategy) @since("2.2.0") def getMissingValue(self): """ Gets the value of :py:attr:`missingValue` or its default value. """ return self.getOrDefault(self.missingValue) @inherit_doc class Imputer(JavaEstimator, _ImputerParams, JavaMLReadable, JavaMLWritable): """ Imputation estimator for completing missing values, using the mean, median or mode of the columns in which the missing values are located. The input columns should be of numeric type. Currently Imputer does not support categorical features and possibly creates incorrect values for a categorical feature. Note that the mean/median/mode value is computed after filtering out missing values. All Null values in the input columns are treated as missing, and so are also imputed. For computing median, :py:meth:`pyspark.sql.DataFrame.approxQuantile` is used with a relative error of `0.001`. .. versionadded:: 2.2.0 Examples -------- >>> df = spark.createDataFrame([(1.0, float("nan")), (2.0, float("nan")), (float("nan"), 3.0), ... (4.0, 4.0), (5.0, 5.0)], ["a", "b"]) >>> imputer = Imputer() >>> imputer.setInputCols(["a", "b"]) Imputer... >>> imputer.setOutputCols(["out_a", "out_b"]) Imputer... >>> imputer.getRelativeError() 0.001 >>> model = imputer.fit(df) >>> model.setInputCols(["a", "b"]) ImputerModel... >>> model.getStrategy() 'mean' >>> model.surrogateDF.show() +---+---+ | a| b| +---+---+ |3.0|4.0| +---+---+ ... >>> model.transform(df).show() +---+---+-----+-----+ | a| b|out_a|out_b| +---+---+-----+-----+ |1.0|NaN| 1.0| 4.0| |2.0|NaN| 2.0| 4.0| |NaN|3.0| 3.0| 3.0| ... >>> imputer.setStrategy("median").setMissingValue(1.0).fit(df).transform(df).show() +---+---+-----+-----+ | a| b|out_a|out_b| +---+---+-----+-----+ |1.0|NaN| 4.0| NaN| ... >>> df1 = spark.createDataFrame([(1.0,), (2.0,), (float("nan"),), (4.0,), (5.0,)], ["a"]) >>> imputer1 = Imputer(inputCol="a", outputCol="out_a") >>> model1 = imputer1.fit(df1) >>> model1.surrogateDF.show() +---+ | a| +---+ |3.0| +---+ ... >>> model1.transform(df1).show() +---+-----+ | a|out_a| +---+-----+ |1.0| 1.0| |2.0| 2.0| |NaN| 3.0| ... >>> imputer1.setStrategy("median").setMissingValue(1.0).fit(df1).transform(df1).show() +---+-----+ | a|out_a| +---+-----+ |1.0| 4.0| ... >>> df2 = spark.createDataFrame([(float("nan"),), (float("nan"),), (3.0,), (4.0,), (5.0,)], ... ["b"]) >>> imputer2 = Imputer(inputCol="b", outputCol="out_b") >>> model2 = imputer2.fit(df2) >>> model2.surrogateDF.show() +---+ | b| +---+ |4.0| +---+ ... >>> model2.transform(df2).show() +---+-----+ | b|out_b| +---+-----+ |NaN| 4.0| |NaN| 4.0| |3.0| 3.0| ... >>> imputer2.setStrategy("median").setMissingValue(1.0).fit(df2).transform(df2).show() +---+-----+ | b|out_b| +---+-----+ |NaN| NaN| ... >>> imputerPath = temp_path + "/imputer" >>> imputer.save(imputerPath) >>> loadedImputer = Imputer.load(imputerPath) >>> loadedImputer.getStrategy() == imputer.getStrategy() True >>> loadedImputer.getMissingValue() 1.0 >>> modelPath = temp_path + "/imputer-model" >>> model.save(modelPath) >>> loadedModel = ImputerModel.load(modelPath) >>> loadedModel.transform(df).head().out_a == model.transform(df).head().out_a True """ @keyword_only def __init__(self, *, strategy="mean", missingValue=float("nan"), inputCols=None, outputCols=None, inputCol=None, outputCol=None, relativeError=0.001): """ __init__(self, \\*, strategy="mean", missingValue=float("nan"), inputCols=None, \ outputCols=None, inputCol=None, outputCol=None, relativeError=0.001): """ super(Imputer, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Imputer", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("2.2.0") def setParams(self, *, strategy="mean", missingValue=float("nan"), inputCols=None, outputCols=None, inputCol=None, outputCol=None, relativeError=0.001): """ setParams(self, \\*, strategy="mean", missingValue=float("nan"), inputCols=None, \ outputCols=None, inputCol=None, outputCol=None, relativeError=0.001) Sets params for this Imputer. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("2.2.0") def setStrategy(self, value): """ Sets the value of :py:attr:`strategy`. """ return self._set(strategy=value) @since("2.2.0") def setMissingValue(self, value): """ Sets the value of :py:attr:`missingValue`. """ return self._set(missingValue=value) @since("2.2.0") def setInputCols(self, value): """ Sets the value of :py:attr:`inputCols`. """ return self._set(inputCols=value) @since("2.2.0") def setOutputCols(self, value): """ Sets the value of :py:attr:`outputCols`. """ return self._set(outputCols=value) @since("3.0.0") def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @since("3.0.0") def setRelativeError(self, value): """ Sets the value of :py:attr:`relativeError`. """ return self._set(relativeError=value) def _create_model(self, java_model): return ImputerModel(java_model) class ImputerModel(JavaModel, _ImputerParams, JavaMLReadable, JavaMLWritable): """ Model fitted by :py:class:`Imputer`. .. versionadded:: 2.2.0 """ @since("3.0.0") def setInputCols(self, value): """ Sets the value of :py:attr:`inputCols`. """ return self._set(inputCols=value) @since("3.0.0") def setOutputCols(self, value): """ Sets the value of :py:attr:`outputCols`. """ return self._set(outputCols=value) @since("3.0.0") def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @property @since("2.2.0") def surrogateDF(self): """ Returns a DataFrame containing inputCols and their corresponding surrogates, which are used to replace the missing values in the input DataFrame. """ return self._call_java("surrogateDF") @inherit_doc class Interaction(JavaTransformer, HasInputCols, HasOutputCol, JavaMLReadable, JavaMLWritable): """ Implements the feature interaction transform. This transformer takes in Double and Vector type columns and outputs a flattened vector of their feature interactions. To handle interaction, we first one-hot encode any nominal features. Then, a vector of the feature cross-products is produced. For example, given the input feature values `Double(2)` and `Vector(3, 4)`, the output would be `Vector(6, 8)` if all input features were numeric. If the first feature was instead nominal with four categories, the output would then be `Vector(0, 0, 0, 0, 3, 4, 0, 0)`. .. versionadded:: 3.0.0 Examples -------- >>> df = spark.createDataFrame([(0.0, 1.0), (2.0, 3.0)], ["a", "b"]) >>> interaction = Interaction() >>> interaction.setInputCols(["a", "b"]) Interaction... >>> interaction.setOutputCol("ab") Interaction... >>> interaction.transform(df).show() +---+---+-----+ | a| b| ab| +---+---+-----+ |0.0|1.0|[0.0]| |2.0|3.0|[6.0]| +---+---+-----+ ... >>> interactionPath = temp_path + "/interaction" >>> interaction.save(interactionPath) >>> loadedInteraction = Interaction.load(interactionPath) >>> loadedInteraction.transform(df).head().ab == interaction.transform(df).head().ab True """ @keyword_only def __init__(self, *, inputCols=None, outputCol=None): """ __init__(self, \\*, inputCols=None, outputCol=None): """ super(Interaction, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Interaction", self.uid) self._setDefault() kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("3.0.0") def setParams(self, *, inputCols=None, outputCol=None): """ setParams(self, \\*, inputCols=None, outputCol=None) Sets params for this Interaction. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("3.0.0") def setInputCols(self, value): """ Sets the value of :py:attr:`inputCols`. """ return self._set(inputCols=value) @since("3.0.0") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) class _MaxAbsScalerParams(HasInputCol, HasOutputCol): """ Params for :py:class:`MaxAbsScaler` and :py:class:`MaxAbsScalerModel`. .. versionadded:: 3.0.0 """ pass @inherit_doc class MaxAbsScaler(JavaEstimator, _MaxAbsScalerParams, JavaMLReadable, JavaMLWritable): """ Rescale each feature individually to range [-1, 1] by dividing through the largest maximum absolute value in each feature. It does not shift/center the data, and thus does not destroy any sparsity. .. versionadded:: 2.0.0 Examples -------- >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([(Vectors.dense([1.0]),), (Vectors.dense([2.0]),)], ["a"]) >>> maScaler = MaxAbsScaler(outputCol="scaled") >>> maScaler.setInputCol("a") MaxAbsScaler... >>> model = maScaler.fit(df) >>> model.setOutputCol("scaledOutput") MaxAbsScalerModel... >>> model.transform(df).show() +-----+------------+ | a|scaledOutput| +-----+------------+ |[1.0]| [0.5]| |[2.0]| [1.0]| +-----+------------+ ... >>> scalerPath = temp_path + "/max-abs-scaler" >>> maScaler.save(scalerPath) >>> loadedMAScaler = MaxAbsScaler.load(scalerPath) >>> loadedMAScaler.getInputCol() == maScaler.getInputCol() True >>> loadedMAScaler.getOutputCol() == maScaler.getOutputCol() True >>> modelPath = temp_path + "/max-abs-scaler-model" >>> model.save(modelPath) >>> loadedModel = MaxAbsScalerModel.load(modelPath) >>> loadedModel.maxAbs == model.maxAbs True >>> loadedModel.transform(df).take(1) == model.transform(df).take(1) True """ @keyword_only def __init__(self, *, inputCol=None, outputCol=None): """ __init__(self, \\*, inputCol=None, outputCol=None) """ super(MaxAbsScaler, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.MaxAbsScaler", self.uid) self._setDefault() kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("2.0.0") def setParams(self, *, inputCol=None, outputCol=None): """ setParams(self, \\*, inputCol=None, outputCol=None) Sets params for this MaxAbsScaler. """ kwargs = self._input_kwargs return self._set(**kwargs) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def _create_model(self, java_model): return MaxAbsScalerModel(java_model) class MaxAbsScalerModel(JavaModel, _MaxAbsScalerParams, JavaMLReadable, JavaMLWritable): """ Model fitted by :py:class:`MaxAbsScaler`. .. versionadded:: 2.0.0 """ @since("3.0.0") def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @property @since("2.0.0") def maxAbs(self): """ Max Abs vector. """ return self._call_java("maxAbs") @inherit_doc class MinHashLSH(_LSH, HasInputCol, HasOutputCol, HasSeed, JavaMLReadable, JavaMLWritable): """ LSH class for Jaccard distance. The input can be dense or sparse vectors, but it is more efficient if it is sparse. For example, `Vectors.sparse(10, [(2, 1.0), (3, 1.0), (5, 1.0)])` means there are 10 elements in the space. This set contains elements 2, 3, and 5. Also, any input vector must have at least 1 non-zero index, and all non-zero values are treated as binary "1" values. .. versionadded:: 2.2.0 Notes ----- See `Wikipedia on MinHash <https://en.wikipedia.org/wiki/MinHash>`_ Examples -------- >>> from pyspark.ml.linalg import Vectors >>> from pyspark.sql.functions import col >>> data = [(0, Vectors.sparse(6, [0, 1, 2], [1.0, 1.0, 1.0]),), ... (1, Vectors.sparse(6, [2, 3, 4], [1.0, 1.0, 1.0]),), ... (2, Vectors.sparse(6, [0, 2, 4], [1.0, 1.0, 1.0]),)] >>> df = spark.createDataFrame(data, ["id", "features"]) >>> mh = MinHashLSH() >>> mh.setInputCol("features") MinHashLSH... >>> mh.setOutputCol("hashes") MinHashLSH... >>> mh.setSeed(12345) MinHashLSH... >>> model = mh.fit(df) >>> model.setInputCol("features") MinHashLSHModel... >>> model.transform(df).head() Row(id=0, features=SparseVector(6, {0: 1.0, 1: 1.0, 2: 1.0}), hashes=[DenseVector([6179668... >>> data2 = [(3, Vectors.sparse(6, [1, 3, 5], [1.0, 1.0, 1.0]),), ... (4, Vectors.sparse(6, [2, 3, 5], [1.0, 1.0, 1.0]),), ... (5, Vectors.sparse(6, [1, 2, 4], [1.0, 1.0, 1.0]),)] >>> df2 = spark.createDataFrame(data2, ["id", "features"]) >>> key = Vectors.sparse(6, [1, 2], [1.0, 1.0]) >>> model.approxNearestNeighbors(df2, key, 1).collect() [Row(id=5, features=SparseVector(6, {1: 1.0, 2: 1.0, 4: 1.0}), hashes=[DenseVector([6179668... >>> model.approxSimilarityJoin(df, df2, 0.6, distCol="JaccardDistance").select( ... col("datasetA.id").alias("idA"), ... col("datasetB.id").alias("idB"), ... col("JaccardDistance")).show() +---+---+---------------+ |idA|idB|JaccardDistance| +---+---+---------------+ | 0| 5| 0.5| | 1| 4| 0.5| +---+---+---------------+ ... >>> mhPath = temp_path + "/mh" >>> mh.save(mhPath) >>> mh2 = MinHashLSH.load(mhPath) >>> mh2.getOutputCol() == mh.getOutputCol() True >>> modelPath = temp_path + "/mh-model" >>> model.save(modelPath) >>> model2 = MinHashLSHModel.load(modelPath) >>> model.transform(df).head().hashes == model2.transform(df).head().hashes True """ @keyword_only def __init__(self, *, inputCol=None, outputCol=None, seed=None, numHashTables=1): """ __init__(self, \\*, inputCol=None, outputCol=None, seed=None, numHashTables=1) """ super(MinHashLSH, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.MinHashLSH", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("2.2.0") def setParams(self, *, inputCol=None, outputCol=None, seed=None, numHashTables=1): """ setParams(self, \\*, inputCol=None, outputCol=None, seed=None, numHashTables=1) Sets params for this MinHashLSH. """ kwargs = self._input_kwargs return self._set(**kwargs) def setSeed(self, value): """ Sets the value of :py:attr:`seed`. """ return self._set(seed=value) def _create_model(self, java_model): return MinHashLSHModel(java_model) class MinHashLSHModel(_LSHModel, JavaMLReadable, JavaMLWritable): r""" Model produced by :py:class:`MinHashLSH`, where where multiple hash functions are stored. Each hash function is picked from the following family of hash functions, where :math:`a_i` and :math:`b_i` are randomly chosen integers less than prime: :math:`h_i(x) = ((x \cdot a_i + b_i) \mod prime)` This hash family is approximately min-wise independent according to the reference. .. versionadded:: 2.2.0 Notes ----- See Tom Bohman, Colin Cooper, and Alan Frieze. "Min-wise independent linear permutations." Electronic Journal of Combinatorics 7 (2000): R26. """ class _MinMaxScalerParams(HasInputCol, HasOutputCol): """ Params for :py:class:`MinMaxScaler` and :py:class:`MinMaxScalerModel`. .. versionadded:: 3.0.0 """ min = Param(Params._dummy(), "min", "Lower bound of the output feature range", typeConverter=TypeConverters.toFloat) max = Param(Params._dummy(), "max", "Upper bound of the output feature range", typeConverter=TypeConverters.toFloat) def __init__(self, *args): super(_MinMaxScalerParams, self).__init__(*args) self._setDefault(min=0.0, max=1.0) @since("1.6.0") def getMin(self): """ Gets the value of min or its default value. """ return self.getOrDefault(self.min) @since("1.6.0") def getMax(self): """ Gets the value of max or its default value. """ return self.getOrDefault(self.max) @inherit_doc class MinMaxScaler(JavaEstimator, _MinMaxScalerParams, JavaMLReadable, JavaMLWritable): """ Rescale each feature individually to a common range [min, max] linearly using column summary statistics, which is also known as min-max normalization or Rescaling. The rescaled value for feature E is calculated as, Rescaled(e_i) = (e_i - E_min) / (E_max - E_min) * (max - min) + min For the case E_max == E_min, Rescaled(e_i) = 0.5 * (max + min) .. versionadded:: 1.6.0 Notes ----- Since zero values will probably be transformed to non-zero values, output of the transformer will be DenseVector even for sparse input. Examples -------- >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([(Vectors.dense([0.0]),), (Vectors.dense([2.0]),)], ["a"]) >>> mmScaler = MinMaxScaler(outputCol="scaled") >>> mmScaler.setInputCol("a") MinMaxScaler... >>> model = mmScaler.fit(df) >>> model.setOutputCol("scaledOutput") MinMaxScalerModel... >>> model.originalMin DenseVector([0.0]) >>> model.originalMax DenseVector([2.0]) >>> model.transform(df).show() +-----+------------+ | a|scaledOutput| +-----+------------+ |[0.0]| [0.0]| |[2.0]| [1.0]| +-----+------------+ ... >>> minMaxScalerPath = temp_path + "/min-max-scaler" >>> mmScaler.save(minMaxScalerPath) >>> loadedMMScaler = MinMaxScaler.load(minMaxScalerPath) >>> loadedMMScaler.getMin() == mmScaler.getMin() True >>> loadedMMScaler.getMax() == mmScaler.getMax() True >>> modelPath = temp_path + "/min-max-scaler-model" >>> model.save(modelPath) >>> loadedModel = MinMaxScalerModel.load(modelPath) >>> loadedModel.originalMin == model.originalMin True >>> loadedModel.originalMax == model.originalMax True >>> loadedModel.transform(df).take(1) == model.transform(df).take(1) True """ @keyword_only def __init__(self, *, min=0.0, max=1.0, inputCol=None, outputCol=None): """ __init__(self, \\*, min=0.0, max=1.0, inputCol=None, outputCol=None) """ super(MinMaxScaler, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.MinMaxScaler", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.6.0") def setParams(self, *, min=0.0, max=1.0, inputCol=None, outputCol=None): """ setParams(self, \\*, min=0.0, max=1.0, inputCol=None, outputCol=None) Sets params for this MinMaxScaler. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.6.0") def setMin(self, value): """ Sets the value of :py:attr:`min`. """ return self._set(min=value) @since("1.6.0") def setMax(self, value): """ Sets the value of :py:attr:`max`. """ return self._set(max=value) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def _create_model(self, java_model): return MinMaxScalerModel(java_model) class MinMaxScalerModel(JavaModel, _MinMaxScalerParams, JavaMLReadable, JavaMLWritable): """ Model fitted by :py:class:`MinMaxScaler`. .. versionadded:: 1.6.0 """ @since("3.0.0") def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @since("3.0.0") def setMin(self, value): """ Sets the value of :py:attr:`min`. """ return self._set(min=value) @since("3.0.0") def setMax(self, value): """ Sets the value of :py:attr:`max`. """ return self._set(max=value) @property @since("2.0.0") def originalMin(self): """ Min value for each original column during fitting. """ return self._call_java("originalMin") @property @since("2.0.0") def originalMax(self): """ Max value for each original column during fitting. """ return self._call_java("originalMax") @inherit_doc class NGram(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable): """ A feature transformer that converts the input array of strings into an array of n-grams. Null values in the input array are ignored. It returns an array of n-grams where each n-gram is represented by a space-separated string of words. When the input is empty, an empty array is returned. When the input array length is less than n (number of elements per n-gram), no n-grams are returned. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([Row(inputTokens=["a", "b", "c", "d", "e"])]) >>> ngram = NGram(n=2) >>> ngram.setInputCol("inputTokens") NGram... >>> ngram.setOutputCol("nGrams") NGram... >>> ngram.transform(df).head() Row(inputTokens=['a', 'b', 'c', 'd', 'e'], nGrams=['a b', 'b c', 'c d', 'd e']) >>> # Change n-gram length >>> ngram.setParams(n=4).transform(df).head() Row(inputTokens=['a', 'b', 'c', 'd', 'e'], nGrams=['a b c d', 'b c d e']) >>> # Temporarily modify output column. >>> ngram.transform(df, {ngram.outputCol: "output"}).head() Row(inputTokens=['a', 'b', 'c', 'd', 'e'], output=['a b c d', 'b c d e']) >>> ngram.transform(df).head() Row(inputTokens=['a', 'b', 'c', 'd', 'e'], nGrams=['a b c d', 'b c d e']) >>> # Must use keyword arguments to specify params. >>> ngram.setParams("text") Traceback (most recent call last): ... TypeError: Method setParams forces keyword arguments. >>> ngramPath = temp_path + "/ngram" >>> ngram.save(ngramPath) >>> loadedNGram = NGram.load(ngramPath) >>> loadedNGram.getN() == ngram.getN() True >>> loadedNGram.transform(df).take(1) == ngram.transform(df).take(1) True """ n = Param(Params._dummy(), "n", "number of elements per n-gram (>=1)", typeConverter=TypeConverters.toInt) @keyword_only def __init__(self, *, n=2, inputCol=None, outputCol=None): """ __init__(self, \\*, n=2, inputCol=None, outputCol=None) """ super(NGram, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.NGram", self.uid) self._setDefault(n=2) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.5.0") def setParams(self, *, n=2, inputCol=None, outputCol=None): """ setParams(self, \\*, n=2, inputCol=None, outputCol=None) Sets params for this NGram. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.5.0") def setN(self, value): """ Sets the value of :py:attr:`n`. """ return self._set(n=value) @since("1.5.0") def getN(self): """ Gets the value of n or its default value. """ return self.getOrDefault(self.n) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @inherit_doc class Normalizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable): """ Normalize a vector to have unit norm using the given p-norm. .. versionadded:: 1.4.0 Examples -------- >>> from pyspark.ml.linalg import Vectors >>> svec = Vectors.sparse(4, {1: 4.0, 3: 3.0}) >>> df = spark.createDataFrame([(Vectors.dense([3.0, -4.0]), svec)], ["dense", "sparse"]) >>> normalizer = Normalizer(p=2.0) >>> normalizer.setInputCol("dense") Normalizer... >>> normalizer.setOutputCol("features") Normalizer... >>> normalizer.transform(df).head().features DenseVector([0.6, -0.8]) >>> normalizer.setParams(inputCol="sparse", outputCol="freqs").transform(df).head().freqs SparseVector(4, {1: 0.8, 3: 0.6}) >>> params = {normalizer.p: 1.0, normalizer.inputCol: "dense", normalizer.outputCol: "vector"} >>> normalizer.transform(df, params).head().vector DenseVector([0.4286, -0.5714]) >>> normalizerPath = temp_path + "/normalizer" >>> normalizer.save(normalizerPath) >>> loadedNormalizer = Normalizer.load(normalizerPath) >>> loadedNormalizer.getP() == normalizer.getP() True >>> loadedNormalizer.transform(df).take(1) == normalizer.transform(df).take(1) True """ p = Param(Params._dummy(), "p", "the p norm value.", typeConverter=TypeConverters.toFloat) @keyword_only def __init__(self, *, p=2.0, inputCol=None, outputCol=None): """ __init__(self, \\*, p=2.0, inputCol=None, outputCol=None) """ super(Normalizer, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Normalizer", self.uid) self._setDefault(p=2.0) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, *, p=2.0, inputCol=None, outputCol=None): """ setParams(self, \\*, p=2.0, inputCol=None, outputCol=None) Sets params for this Normalizer. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.4.0") def setP(self, value): """ Sets the value of :py:attr:`p`. """ return self._set(p=value) @since("1.4.0") def getP(self): """ Gets the value of p or its default value. """ return self.getOrDefault(self.p) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) class _OneHotEncoderParams(HasInputCol, HasInputCols, HasOutputCol, HasOutputCols, HasHandleInvalid): """ Params for :py:class:`OneHotEncoder` and :py:class:`OneHotEncoderModel`. .. versionadded:: 3.0.0 """ handleInvalid = Param(Params._dummy(), "handleInvalid", "How to handle invalid data during " + "transform(). Options are 'keep' (invalid data presented as an extra " + "categorical feature) or error (throw an error). Note that this Param " + "is only used during transform; during fitting, invalid data will " + "result in an error.", typeConverter=TypeConverters.toString) dropLast = Param(Params._dummy(), "dropLast", "whether to drop the last category", typeConverter=TypeConverters.toBoolean) def __init__(self, *args): super(_OneHotEncoderParams, self).__init__(*args) self._setDefault(handleInvalid="error", dropLast=True) @since("2.3.0") def getDropLast(self): """ Gets the value of dropLast or its default value. """ return self.getOrDefault(self.dropLast) @inherit_doc class OneHotEncoder(JavaEstimator, _OneHotEncoderParams, JavaMLReadable, JavaMLWritable): """ A one-hot encoder that maps a column of category indices to a column of binary vectors, with at most a single one-value per row that indicates the input category index. For example with 5 categories, an input value of 2.0 would map to an output vector of `[0.0, 0.0, 1.0, 0.0]`. The last category is not included by default (configurable via :py:attr:`dropLast`), because it makes the vector entries sum up to one, and hence linearly dependent. So an input value of 4.0 maps to `[0.0, 0.0, 0.0, 0.0]`. When :py:attr:`handleInvalid` is configured to 'keep', an extra "category" indicating invalid values is added as last category. So when :py:attr:`dropLast` is true, invalid values are encoded as all-zeros vector. .. versionadded:: 2.3.0 Notes ----- This is different from scikit-learn's OneHotEncoder, which keeps all categories. The output vectors are sparse. When encoding multi-column by using :py:attr:`inputCols` and :py:attr:`outputCols` params, input/output cols come in pairs, specified by the order in the arrays, and each pair is treated independently. See Also -------- StringIndexer : for converting categorical values into category indices Examples -------- >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([(0.0,), (1.0,), (2.0,)], ["input"]) >>> ohe = OneHotEncoder() >>> ohe.setInputCols(["input"]) OneHotEncoder... >>> ohe.setOutputCols(["output"]) OneHotEncoder... >>> model = ohe.fit(df) >>> model.setOutputCols(["output"]) OneHotEncoderModel... >>> model.getHandleInvalid() 'error' >>> model.transform(df).head().output SparseVector(2, {0: 1.0}) >>> single_col_ohe = OneHotEncoder(inputCol="input", outputCol="output") >>> single_col_model = single_col_ohe.fit(df) >>> single_col_model.transform(df).head().output SparseVector(2, {0: 1.0}) >>> ohePath = temp_path + "/ohe" >>> ohe.save(ohePath) >>> loadedOHE = OneHotEncoder.load(ohePath) >>> loadedOHE.getInputCols() == ohe.getInputCols() True >>> modelPath = temp_path + "/ohe-model" >>> model.save(modelPath) >>> loadedModel = OneHotEncoderModel.load(modelPath) >>> loadedModel.categorySizes == model.categorySizes True >>> loadedModel.transform(df).take(1) == model.transform(df).take(1) True """ @keyword_only def __init__(self, *, inputCols=None, outputCols=None, handleInvalid="error", dropLast=True, inputCol=None, outputCol=None): """ __init__(self, \\*, inputCols=None, outputCols=None, handleInvalid="error", dropLast=True, \ inputCol=None, outputCol=None) """ super(OneHotEncoder, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.feature.OneHotEncoder", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("2.3.0") def setParams(self, *, inputCols=None, outputCols=None, handleInvalid="error", dropLast=True, inputCol=None, outputCol=None): """ setParams(self, \\*, inputCols=None, outputCols=None, handleInvalid="error", \ dropLast=True, inputCol=None, outputCol=None) Sets params for this OneHotEncoder. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("2.3.0") def setDropLast(self, value): """ Sets the value of :py:attr:`dropLast`. """ return self._set(dropLast=value) @since("3.0.0") def setInputCols(self, value): """ Sets the value of :py:attr:`inputCols`. """ return self._set(inputCols=value) @since("3.0.0") def setOutputCols(self, value): """ Sets the value of :py:attr:`outputCols`. """ return self._set(outputCols=value) @since("3.0.0") def setHandleInvalid(self, value): """ Sets the value of :py:attr:`handleInvalid`. """ return self._set(handleInvalid=value) @since("3.0.0") def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def _create_model(self, java_model): return OneHotEncoderModel(java_model) class OneHotEncoderModel(JavaModel, _OneHotEncoderParams, JavaMLReadable, JavaMLWritable): """ Model fitted by :py:class:`OneHotEncoder`. .. versionadded:: 2.3.0 """ @since("3.0.0") def setDropLast(self, value): """ Sets the value of :py:attr:`dropLast`. """ return self._set(dropLast=value) @since("3.0.0") def setInputCols(self, value): """ Sets the value of :py:attr:`inputCols`. """ return self._set(inputCols=value) @since("3.0.0") def setOutputCols(self, value): """ Sets the value of :py:attr:`outputCols`. """ return self._set(outputCols=value) @since("3.0.0") def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @since("3.0.0") def setHandleInvalid(self, value): """ Sets the value of :py:attr:`handleInvalid`. """ return self._set(handleInvalid=value) @property @since("2.3.0") def categorySizes(self): """ Original number of categories for each feature being encoded. The array contains one value for each input column, in order. """ return self._call_java("categorySizes") @inherit_doc class PolynomialExpansion(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable): """ Perform feature expansion in a polynomial space. As said in `wikipedia of Polynomial Expansion <http://en.wikipedia.org/wiki/Polynomial_expansion>`_, "In mathematics, an expansion of a product of sums expresses it as a sum of products by using the fact that multiplication distributes over addition". Take a 2-variable feature vector as an example: `(x, y)`, if we want to expand it with degree 2, then we get `(x, x * x, y, x * y, y * y)`. .. versionadded:: 1.4.0 Examples -------- >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([(Vectors.dense([0.5, 2.0]),)], ["dense"]) >>> px = PolynomialExpansion(degree=2) >>> px.setInputCol("dense") PolynomialExpansion... >>> px.setOutputCol("expanded") PolynomialExpansion... >>> px.transform(df).head().expanded DenseVector([0.5, 0.25, 2.0, 1.0, 4.0]) >>> px.setParams(outputCol="test").transform(df).head().test DenseVector([0.5, 0.25, 2.0, 1.0, 4.0]) >>> polyExpansionPath = temp_path + "/poly-expansion" >>> px.save(polyExpansionPath) >>> loadedPx = PolynomialExpansion.load(polyExpansionPath) >>> loadedPx.getDegree() == px.getDegree() True >>> loadedPx.transform(df).take(1) == px.transform(df).take(1) True """ degree = Param(Params._dummy(), "degree", "the polynomial degree to expand (>= 1)", typeConverter=TypeConverters.toInt) @keyword_only def __init__(self, *, degree=2, inputCol=None, outputCol=None): """ __init__(self, \\*, degree=2, inputCol=None, outputCol=None) """ super(PolynomialExpansion, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.feature.PolynomialExpansion", self.uid) self._setDefault(degree=2) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, *, degree=2, inputCol=None, outputCol=None): """ setParams(self, \\*, degree=2, inputCol=None, outputCol=None) Sets params for this PolynomialExpansion. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.4.0") def setDegree(self, value): """ Sets the value of :py:attr:`degree`. """ return self._set(degree=value) @since("1.4.0") def getDegree(self): """ Gets the value of degree or its default value. """ return self.getOrDefault(self.degree) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @inherit_doc class QuantileDiscretizer(JavaEstimator, HasInputCol, HasOutputCol, HasInputCols, HasOutputCols, HasHandleInvalid, HasRelativeError, JavaMLReadable, JavaMLWritable): """ :py:class:`QuantileDiscretizer` takes a column with continuous features and outputs a column with binned categorical features. The number of bins can be set using the :py:attr:`numBuckets` parameter. It is possible that the number of buckets used will be less than this value, for example, if there are too few distinct values of the input to create enough distinct quantiles. Since 3.0.0, :py:class:`QuantileDiscretizer` can map multiple columns at once by setting the :py:attr:`inputCols` parameter. If both of the :py:attr:`inputCol` and :py:attr:`inputCols` parameters are set, an Exception will be thrown. To specify the number of buckets for each column, the :py:attr:`numBucketsArray` parameter can be set, or if the number of buckets should be the same across columns, :py:attr:`numBuckets` can be set as a convenience. .. versionadded:: 2.0.0 Notes ----- NaN handling: Note also that :py:class:`QuantileDiscretizer` will raise an error when it finds NaN values in the dataset, but the user can also choose to either keep or remove NaN values within the dataset by setting :py:attr:`handleInvalid` parameter. If the user chooses to keep NaN values, they will be handled specially and placed into their own bucket, for example, if 4 buckets are used, then non-NaN data will be put into buckets[0-3], but NaNs will be counted in a special bucket[4]. Algorithm: The bin ranges are chosen using an approximate algorithm (see the documentation for :py:meth:`~.DataFrameStatFunctions.approxQuantile` for a detailed description). The precision of the approximation can be controlled with the :py:attr:`relativeError` parameter. The lower and upper bin bounds will be `-Infinity` and `+Infinity`, covering all real values. Examples -------- >>> values = [(0.1,), (0.4,), (1.2,), (1.5,), (float("nan"),), (float("nan"),)] >>> df1 = spark.createDataFrame(values, ["values"]) >>> qds1 = QuantileDiscretizer(inputCol="values", outputCol="buckets") >>> qds1.setNumBuckets(2) QuantileDiscretizer... >>> qds1.setRelativeError(0.01) QuantileDiscretizer... >>> qds1.setHandleInvalid("error") QuantileDiscretizer... >>> qds1.getRelativeError() 0.01 >>> bucketizer = qds1.fit(df1) >>> qds1.setHandleInvalid("keep").fit(df1).transform(df1).count() 6 >>> qds1.setHandleInvalid("skip").fit(df1).transform(df1).count() 4 >>> splits = bucketizer.getSplits() >>> splits[0] -inf >>> print("%2.1f" % round(splits[1], 1)) 0.4 >>> bucketed = bucketizer.transform(df1).head() >>> bucketed.buckets 0.0 >>> quantileDiscretizerPath = temp_path + "/quantile-discretizer" >>> qds1.save(quantileDiscretizerPath) >>> loadedQds = QuantileDiscretizer.load(quantileDiscretizerPath) >>> loadedQds.getNumBuckets() == qds1.getNumBuckets() True >>> inputs = [(0.1, 0.0), (0.4, 1.0), (1.2, 1.3), (1.5, 1.5), ... (float("nan"), float("nan")), (float("nan"), float("nan"))] >>> df2 = spark.createDataFrame(inputs, ["input1", "input2"]) >>> qds2 = QuantileDiscretizer(relativeError=0.01, handleInvalid="error", numBuckets=2, ... inputCols=["input1", "input2"], outputCols=["output1", "output2"]) >>> qds2.getRelativeError() 0.01 >>> qds2.setHandleInvalid("keep").fit(df2).transform(df2).show() +------+------+-------+-------+ |input1|input2|output1|output2| +------+------+-------+-------+ | 0.1| 0.0| 0.0| 0.0| | 0.4| 1.0| 1.0| 1.0| | 1.2| 1.3| 1.0| 1.0| | 1.5| 1.5| 1.0| 1.0| | NaN| NaN| 2.0| 2.0| | NaN| NaN| 2.0| 2.0| +------+------+-------+-------+ ... >>> qds3 = QuantileDiscretizer(relativeError=0.01, handleInvalid="error", ... numBucketsArray=[5, 10], inputCols=["input1", "input2"], ... outputCols=["output1", "output2"]) >>> qds3.setHandleInvalid("skip").fit(df2).transform(df2).show() +------+------+-------+-------+ |input1|input2|output1|output2| +------+------+-------+-------+ | 0.1| 0.0| 1.0| 1.0| | 0.4| 1.0| 2.0| 2.0| | 1.2| 1.3| 3.0| 3.0| | 1.5| 1.5| 4.0| 4.0| +------+------+-------+-------+ ... """ numBuckets = Param(Params._dummy(), "numBuckets", "Maximum number of buckets (quantiles, or " + "categories) into which data points are grouped. Must be >= 2.", typeConverter=TypeConverters.toInt) handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. " + "Options are skip (filter out rows with invalid values), " + "error (throw an error), or keep (keep invalid values in a special " + "additional bucket). Note that in the multiple columns " + "case, the invalid handling is applied to all columns. That said " + "for 'error' it will throw an error if any invalids are found in " + "any columns, for 'skip' it will skip rows with any invalids in " + "any columns, etc.", typeConverter=TypeConverters.toString) numBucketsArray = Param(Params._dummy(), "numBucketsArray", "Array of number of buckets " + "(quantiles, or categories) into which data points are grouped. " + "This is for multiple columns input. If transforming multiple " + "columns and numBucketsArray is not set, but numBuckets is set, " + "then numBuckets will be applied across all columns.", typeConverter=TypeConverters.toListInt) @keyword_only def __init__(self, *, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001, handleInvalid="error", numBucketsArray=None, inputCols=None, outputCols=None): """ __init__(self, \\*, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001, \ handleInvalid="error", numBucketsArray=None, inputCols=None, outputCols=None) """ super(QuantileDiscretizer, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.QuantileDiscretizer", self.uid) self._setDefault(numBuckets=2, relativeError=0.001, handleInvalid="error") kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("2.0.0") def setParams(self, *, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001, handleInvalid="error", numBucketsArray=None, inputCols=None, outputCols=None): """ setParams(self, \\*, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001, \ handleInvalid="error", numBucketsArray=None, inputCols=None, outputCols=None) Set the params for the QuantileDiscretizer """ kwargs = self._input_kwargs return self._set(**kwargs) @since("2.0.0") def setNumBuckets(self, value): """ Sets the value of :py:attr:`numBuckets`. """ return self._set(numBuckets=value) @since("2.0.0") def getNumBuckets(self): """ Gets the value of numBuckets or its default value. """ return self.getOrDefault(self.numBuckets) @since("3.0.0") def setNumBucketsArray(self, value): """ Sets the value of :py:attr:`numBucketsArray`. """ return self._set(numBucketsArray=value) @since("3.0.0") def getNumBucketsArray(self): """ Gets the value of numBucketsArray or its default value. """ return self.getOrDefault(self.numBucketsArray) @since("2.0.0") def setRelativeError(self, value): """ Sets the value of :py:attr:`relativeError`. """ return self._set(relativeError=value) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setInputCols(self, value): """ Sets the value of :py:attr:`inputCols`. """ return self._set(inputCols=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @since("3.0.0") def setOutputCols(self, value): """ Sets the value of :py:attr:`outputCols`. """ return self._set(outputCols=value) def setHandleInvalid(self, value): """ Sets the value of :py:attr:`handleInvalid`. """ return self._set(handleInvalid=value) def _create_model(self, java_model): """ Private method to convert the java_model to a Python model. """ if (self.isSet(self.inputCol)): return Bucketizer(splits=list(java_model.getSplits()), inputCol=self.getInputCol(), outputCol=self.getOutputCol(), handleInvalid=self.getHandleInvalid()) else: splitsArrayList = [list(x) for x in list(java_model.getSplitsArray())] return Bucketizer(splitsArray=splitsArrayList, inputCols=self.getInputCols(), outputCols=self.getOutputCols(), handleInvalid=self.getHandleInvalid()) class _RobustScalerParams(HasInputCol, HasOutputCol, HasRelativeError): """ Params for :py:class:`RobustScaler` and :py:class:`RobustScalerModel`. .. versionadded:: 3.0.0 """ lower = Param(Params._dummy(), "lower", "Lower quantile to calculate quantile range", typeConverter=TypeConverters.toFloat) upper = Param(Params._dummy(), "upper", "Upper quantile to calculate quantile range", typeConverter=TypeConverters.toFloat) withCentering = Param(Params._dummy(), "withCentering", "Whether to center data with median", typeConverter=TypeConverters.toBoolean) withScaling = Param(Params._dummy(), "withScaling", "Whether to scale the data to " "quantile range", typeConverter=TypeConverters.toBoolean) def __init__(self, *args): super(_RobustScalerParams, self).__init__(*args) self._setDefault(lower=0.25, upper=0.75, withCentering=False, withScaling=True, relativeError=0.001) @since("3.0.0") def getLower(self): """ Gets the value of lower or its default value. """ return self.getOrDefault(self.lower) @since("3.0.0") def getUpper(self): """ Gets the value of upper or its default value. """ return self.getOrDefault(self.upper) @since("3.0.0") def getWithCentering(self): """ Gets the value of withCentering or its default value. """ return self.getOrDefault(self.withCentering) @since("3.0.0") def getWithScaling(self): """ Gets the value of withScaling or its default value. """ return self.getOrDefault(self.withScaling) @inherit_doc class RobustScaler(JavaEstimator, _RobustScalerParams, JavaMLReadable, JavaMLWritable): """ RobustScaler removes the median and scales the data according to the quantile range. The quantile range is by default IQR (Interquartile Range, quantile range between the 1st quartile = 25th quantile and the 3rd quartile = 75th quantile) but can be configured. Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Median and quantile range are then stored to be used on later data using the transform method. Note that NaN values are ignored in the computation of medians and ranges. .. versionadded:: 3.0.0 Examples -------- >>> from pyspark.ml.linalg import Vectors >>> data = [(0, Vectors.dense([0.0, 0.0]),), ... (1, Vectors.dense([1.0, -1.0]),), ... (2, Vectors.dense([2.0, -2.0]),), ... (3, Vectors.dense([3.0, -3.0]),), ... (4, Vectors.dense([4.0, -4.0]),),] >>> df = spark.createDataFrame(data, ["id", "features"]) >>> scaler = RobustScaler() >>> scaler.setInputCol("features") RobustScaler... >>> scaler.setOutputCol("scaled") RobustScaler... >>> model = scaler.fit(df) >>> model.setOutputCol("output") RobustScalerModel... >>> model.median DenseVector([2.0, -2.0]) >>> model.range DenseVector([2.0, 2.0]) >>> model.transform(df).collect()[1].output DenseVector([0.5, -0.5]) >>> scalerPath = temp_path + "/robust-scaler" >>> scaler.save(scalerPath) >>> loadedScaler = RobustScaler.load(scalerPath) >>> loadedScaler.getWithCentering() == scaler.getWithCentering() True >>> loadedScaler.getWithScaling() == scaler.getWithScaling() True >>> modelPath = temp_path + "/robust-scaler-model" >>> model.save(modelPath) >>> loadedModel = RobustScalerModel.load(modelPath) >>> loadedModel.median == model.median True >>> loadedModel.range == model.range True >>> loadedModel.transform(df).take(1) == model.transform(df).take(1) True """ @keyword_only def __init__(self, *, lower=0.25, upper=0.75, withCentering=False, withScaling=True, inputCol=None, outputCol=None, relativeError=0.001): """ __init__(self, \\*, lower=0.25, upper=0.75, withCentering=False, withScaling=True, \ inputCol=None, outputCol=None, relativeError=0.001) """ super(RobustScaler, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RobustScaler", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("3.0.0") def setParams(self, *, lower=0.25, upper=0.75, withCentering=False, withScaling=True, inputCol=None, outputCol=None, relativeError=0.001): """ setParams(self, \\*, lower=0.25, upper=0.75, withCentering=False, withScaling=True, \ inputCol=None, outputCol=None, relativeError=0.001) Sets params for this RobustScaler. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("3.0.0") def setLower(self, value): """ Sets the value of :py:attr:`lower`. """ return self._set(lower=value) @since("3.0.0") def setUpper(self, value): """ Sets the value of :py:attr:`upper`. """ return self._set(upper=value) @since("3.0.0") def setWithCentering(self, value): """ Sets the value of :py:attr:`withCentering`. """ return self._set(withCentering=value) @since("3.0.0") def setWithScaling(self, value): """ Sets the value of :py:attr:`withScaling`. """ return self._set(withScaling=value) @since("3.0.0") def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @since("3.0.0") def setRelativeError(self, value): """ Sets the value of :py:attr:`relativeError`. """ return self._set(relativeError=value) def _create_model(self, java_model): return RobustScalerModel(java_model) class RobustScalerModel(JavaModel, _RobustScalerParams, JavaMLReadable, JavaMLWritable): """ Model fitted by :py:class:`RobustScaler`. .. versionadded:: 3.0.0 """ @since("3.0.0") def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @property @since("3.0.0") def median(self): """ Median of the RobustScalerModel. """ return self._call_java("median") @property @since("3.0.0") def range(self): """ Quantile range of the RobustScalerModel. """ return self._call_java("range") @inherit_doc class RegexTokenizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable): """ A regex based tokenizer that extracts tokens either by using the provided regex pattern (in Java dialect) to split the text (default) or repeatedly matching the regex (if gaps is false). Optional parameters also allow filtering tokens using a minimal length. It returns an array of strings that can be empty. .. versionadded:: 1.4.0 Examples -------- >>> df = spark.createDataFrame([("A B c",)], ["text"]) >>> reTokenizer = RegexTokenizer() >>> reTokenizer.setInputCol("text") RegexTokenizer... >>> reTokenizer.setOutputCol("words") RegexTokenizer... >>> reTokenizer.transform(df).head() Row(text='A B c', words=['a', 'b', 'c']) >>> # Change a parameter. >>> reTokenizer.setParams(outputCol="tokens").transform(df).head() Row(text='A B c', tokens=['a', 'b', 'c']) >>> # Temporarily modify a parameter. >>> reTokenizer.transform(df, {reTokenizer.outputCol: "words"}).head() Row(text='A B c', words=['a', 'b', 'c']) >>> reTokenizer.transform(df).head() Row(text='A B c', tokens=['a', 'b', 'c']) >>> # Must use keyword arguments to specify params. >>> reTokenizer.setParams("text") Traceback (most recent call last): ... TypeError: Method setParams forces keyword arguments. >>> regexTokenizerPath = temp_path + "/regex-tokenizer" >>> reTokenizer.save(regexTokenizerPath) >>> loadedReTokenizer = RegexTokenizer.load(regexTokenizerPath) >>> loadedReTokenizer.getMinTokenLength() == reTokenizer.getMinTokenLength() True >>> loadedReTokenizer.getGaps() == reTokenizer.getGaps() True >>> loadedReTokenizer.transform(df).take(1) == reTokenizer.transform(df).take(1) True """ minTokenLength = Param(Params._dummy(), "minTokenLength", "minimum token length (>= 0)", typeConverter=TypeConverters.toInt) gaps = Param(Params._dummy(), "gaps", "whether regex splits on gaps (True) or matches tokens " + "(False)") pattern = Param(Params._dummy(), "pattern", "regex pattern (Java dialect) used for tokenizing", typeConverter=TypeConverters.toString) toLowercase = Param(Params._dummy(), "toLowercase", "whether to convert all characters to " + "lowercase before tokenizing", typeConverter=TypeConverters.toBoolean) @keyword_only def __init__(self, *, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, outputCol=None, toLowercase=True): """ __init__(self, \\*, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, \ outputCol=None, toLowercase=True) """ super(RegexTokenizer, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RegexTokenizer", self.uid) self._setDefault(minTokenLength=1, gaps=True, pattern="\\s+", toLowercase=True) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, *, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, outputCol=None, toLowercase=True): """ setParams(self, \\*, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, \ outputCol=None, toLowercase=True) Sets params for this RegexTokenizer. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.4.0") def setMinTokenLength(self, value): """ Sets the value of :py:attr:`minTokenLength`. """ return self._set(minTokenLength=value) @since("1.4.0") def getMinTokenLength(self): """ Gets the value of minTokenLength or its default value. """ return self.getOrDefault(self.minTokenLength) @since("1.4.0") def setGaps(self, value): """ Sets the value of :py:attr:`gaps`. """ return self._set(gaps=value) @since("1.4.0") def getGaps(self): """ Gets the value of gaps or its default value. """ return self.getOrDefault(self.gaps) @since("1.4.0") def setPattern(self, value): """ Sets the value of :py:attr:`pattern`. """ return self._set(pattern=value) @since("1.4.0") def getPattern(self): """ Gets the value of pattern or its default value. """ return self.getOrDefault(self.pattern) @since("2.0.0") def setToLowercase(self, value): """ Sets the value of :py:attr:`toLowercase`. """ return self._set(toLowercase=value) @since("2.0.0") def getToLowercase(self): """ Gets the value of toLowercase or its default value. """ return self.getOrDefault(self.toLowercase) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @inherit_doc class SQLTransformer(JavaTransformer, JavaMLReadable, JavaMLWritable): """ Implements the transforms which are defined by SQL statement. Currently we only support SQL syntax like `SELECT ... FROM __THIS__` where `__THIS__` represents the underlying table of the input dataset. .. versionadded:: 1.6.0 Examples -------- >>> df = spark.createDataFrame([(0, 1.0, 3.0), (2, 2.0, 5.0)], ["id", "v1", "v2"]) >>> sqlTrans = SQLTransformer( ... statement="SELECT *, (v1 + v2) AS v3, (v1 * v2) AS v4 FROM __THIS__") >>> sqlTrans.transform(df).head() Row(id=0, v1=1.0, v2=3.0, v3=4.0, v4=3.0) >>> sqlTransformerPath = temp_path + "/sql-transformer" >>> sqlTrans.save(sqlTransformerPath) >>> loadedSqlTrans = SQLTransformer.load(sqlTransformerPath) >>> loadedSqlTrans.getStatement() == sqlTrans.getStatement() True >>> loadedSqlTrans.transform(df).take(1) == sqlTrans.transform(df).take(1) True """ statement = Param(Params._dummy(), "statement", "SQL statement", typeConverter=TypeConverters.toString) @keyword_only def __init__(self, *, statement=None): """ __init__(self, \\*, statement=None) """ super(SQLTransformer, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.SQLTransformer", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.6.0") def setParams(self, *, statement=None): """ setParams(self, \\*, statement=None) Sets params for this SQLTransformer. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.6.0") def setStatement(self, value): """ Sets the value of :py:attr:`statement`. """ return self._set(statement=value) @since("1.6.0") def getStatement(self): """ Gets the value of statement or its default value. """ return self.getOrDefault(self.statement) class _StandardScalerParams(HasInputCol, HasOutputCol): """ Params for :py:class:`StandardScaler` and :py:class:`StandardScalerModel`. .. versionadded:: 3.0.0 """ withMean = Param(Params._dummy(), "withMean", "Center data with mean", typeConverter=TypeConverters.toBoolean) withStd = Param(Params._dummy(), "withStd", "Scale to unit standard deviation", typeConverter=TypeConverters.toBoolean) def __init__(self, *args): super(_StandardScalerParams, self).__init__(*args) self._setDefault(withMean=False, withStd=True) @since("1.4.0") def getWithMean(self): """ Gets the value of withMean or its default value. """ return self.getOrDefault(self.withMean) @since("1.4.0") def getWithStd(self): """ Gets the value of withStd or its default value. """ return self.getOrDefault(self.withStd) @inherit_doc class StandardScaler(JavaEstimator, _StandardScalerParams, JavaMLReadable, JavaMLWritable): """ Standardizes features by removing the mean and scaling to unit variance using column summary statistics on the samples in the training set. The "unit std" is computed using the `corrected sample standard deviation \ <https://en.wikipedia.org/wiki/Standard_deviation#Corrected_sample_standard_deviation>`_, which is computed as the square root of the unbiased sample variance. .. versionadded:: 1.4.0 Examples -------- >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([(Vectors.dense([0.0]),), (Vectors.dense([2.0]),)], ["a"]) >>> standardScaler = StandardScaler() >>> standardScaler.setInputCol("a") StandardScaler... >>> standardScaler.setOutputCol("scaled") StandardScaler... >>> model = standardScaler.fit(df) >>> model.getInputCol() 'a' >>> model.setOutputCol("output") StandardScalerModel... >>> model.mean DenseVector([1.0]) >>> model.std DenseVector([1.4142]) >>> model.transform(df).collect()[1].output DenseVector([1.4142]) >>> standardScalerPath = temp_path + "/standard-scaler" >>> standardScaler.save(standardScalerPath) >>> loadedStandardScaler = StandardScaler.load(standardScalerPath) >>> loadedStandardScaler.getWithMean() == standardScaler.getWithMean() True >>> loadedStandardScaler.getWithStd() == standardScaler.getWithStd() True >>> modelPath = temp_path + "/standard-scaler-model" >>> model.save(modelPath) >>> loadedModel = StandardScalerModel.load(modelPath) >>> loadedModel.std == model.std True >>> loadedModel.mean == model.mean True >>> loadedModel.transform(df).take(1) == model.transform(df).take(1) True """ @keyword_only def __init__(self, *, withMean=False, withStd=True, inputCol=None, outputCol=None): """ __init__(self, \\*, withMean=False, withStd=True, inputCol=None, outputCol=None) """ super(StandardScaler, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StandardScaler", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, *, withMean=False, withStd=True, inputCol=None, outputCol=None): """ setParams(self, \\*, withMean=False, withStd=True, inputCol=None, outputCol=None) Sets params for this StandardScaler. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.4.0") def setWithMean(self, value): """ Sets the value of :py:attr:`withMean`. """ return self._set(withMean=value) @since("1.4.0") def setWithStd(self, value): """ Sets the value of :py:attr:`withStd`. """ return self._set(withStd=value) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def _create_model(self, java_model): return StandardScalerModel(java_model) class StandardScalerModel(JavaModel, _StandardScalerParams, JavaMLReadable, JavaMLWritable): """ Model fitted by :py:class:`StandardScaler`. .. versionadded:: 1.4.0 """ def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @property @since("2.0.0") def std(self): """ Standard deviation of the StandardScalerModel. """ return self._call_java("std") @property @since("2.0.0") def mean(self): """ Mean of the StandardScalerModel. """ return self._call_java("mean") class _StringIndexerParams(JavaParams, HasHandleInvalid, HasInputCol, HasOutputCol, HasInputCols, HasOutputCols): """ Params for :py:class:`StringIndexer` and :py:class:`StringIndexerModel`. """ stringOrderType = Param(Params._dummy(), "stringOrderType", "How to order labels of string column. The first label after " + "ordering is assigned an index of 0. Supported options: " + "frequencyDesc, frequencyAsc, alphabetDesc, alphabetAsc. " + "Default is frequencyDesc. In case of equal frequency when " + "under frequencyDesc/Asc, the strings are further sorted " + "alphabetically", typeConverter=TypeConverters.toString) handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid data (unseen " + "or NULL values) in features and label column of string type. " + "Options are 'skip' (filter out rows with invalid data), " + "error (throw an error), or 'keep' (put invalid data " + "in a special additional bucket, at index numLabels).", typeConverter=TypeConverters.toString) def __init__(self, *args): super(_StringIndexerParams, self).__init__(*args) self._setDefault(handleInvalid="error", stringOrderType="frequencyDesc") @since("2.3.0") def getStringOrderType(self): """ Gets the value of :py:attr:`stringOrderType` or its default value 'frequencyDesc'. """ return self.getOrDefault(self.stringOrderType) @inherit_doc class StringIndexer(JavaEstimator, _StringIndexerParams, JavaMLReadable, JavaMLWritable): """ A label indexer that maps a string column of labels to an ML column of label indices. If the input column is numeric, we cast it to string and index the string values. The indices are in [0, numLabels). By default, this is ordered by label frequencies so the most frequent label gets index 0. The ordering behavior is controlled by setting :py:attr:`stringOrderType`. Its default value is 'frequencyDesc'. .. versionadded:: 1.4.0 Examples -------- >>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed", ... stringOrderType="frequencyDesc") >>> stringIndexer.setHandleInvalid("error") StringIndexer... >>> model = stringIndexer.fit(stringIndDf) >>> model.setHandleInvalid("error") StringIndexerModel... >>> td = model.transform(stringIndDf) >>> sorted(set([(i[0], i[1]) for i in td.select(td.id, td.indexed).collect()]), ... key=lambda x: x[0]) [(0, 0.0), (1, 2.0), (2, 1.0), (3, 0.0), (4, 0.0), (5, 1.0)] >>> inverter = IndexToString(inputCol="indexed", outputCol="label2", labels=model.labels) >>> itd = inverter.transform(td) >>> sorted(set([(i[0], str(i[1])) for i in itd.select(itd.id, itd.label2).collect()]), ... key=lambda x: x[0]) [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'a'), (4, 'a'), (5, 'c')] >>> stringIndexerPath = temp_path + "/string-indexer" >>> stringIndexer.save(stringIndexerPath) >>> loadedIndexer = StringIndexer.load(stringIndexerPath) >>> loadedIndexer.getHandleInvalid() == stringIndexer.getHandleInvalid() True >>> modelPath = temp_path + "/string-indexer-model" >>> model.save(modelPath) >>> loadedModel = StringIndexerModel.load(modelPath) >>> loadedModel.labels == model.labels True >>> indexToStringPath = temp_path + "/index-to-string" >>> inverter.save(indexToStringPath) >>> loadedInverter = IndexToString.load(indexToStringPath) >>> loadedInverter.getLabels() == inverter.getLabels() True >>> loadedModel.transform(stringIndDf).take(1) == model.transform(stringIndDf).take(1) True >>> stringIndexer.getStringOrderType() 'frequencyDesc' >>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed", handleInvalid="error", ... stringOrderType="alphabetDesc") >>> model = stringIndexer.fit(stringIndDf) >>> td = model.transform(stringIndDf) >>> sorted(set([(i[0], i[1]) for i in td.select(td.id, td.indexed).collect()]), ... key=lambda x: x[0]) [(0, 2.0), (1, 1.0), (2, 0.0), (3, 2.0), (4, 2.0), (5, 0.0)] >>> fromlabelsModel = StringIndexerModel.from_labels(["a", "b", "c"], ... inputCol="label", outputCol="indexed", handleInvalid="error") >>> result = fromlabelsModel.transform(stringIndDf) >>> sorted(set([(i[0], i[1]) for i in result.select(result.id, result.indexed).collect()]), ... key=lambda x: x[0]) [(0, 0.0), (1, 1.0), (2, 2.0), (3, 0.0), (4, 0.0), (5, 2.0)] >>> testData = sc.parallelize([Row(id=0, label1="a", label2="e"), ... Row(id=1, label1="b", label2="f"), ... Row(id=2, label1="c", label2="e"), ... Row(id=3, label1="a", label2="f"), ... Row(id=4, label1="a", label2="f"), ... Row(id=5, label1="c", label2="f")], 3) >>> multiRowDf = spark.createDataFrame(testData) >>> inputs = ["label1", "label2"] >>> outputs = ["index1", "index2"] >>> stringIndexer = StringIndexer(inputCols=inputs, outputCols=outputs) >>> model = stringIndexer.fit(multiRowDf) >>> result = model.transform(multiRowDf) >>> sorted(set([(i[0], i[1], i[2]) for i in result.select(result.id, result.index1, ... result.index2).collect()]), key=lambda x: x[0]) [(0, 0.0, 1.0), (1, 2.0, 0.0), (2, 1.0, 1.0), (3, 0.0, 0.0), (4, 0.0, 0.0), (5, 1.0, 0.0)] >>> fromlabelsModel = StringIndexerModel.from_arrays_of_labels([["a", "b", "c"], ["e", "f"]], ... inputCols=inputs, outputCols=outputs) >>> result = fromlabelsModel.transform(multiRowDf) >>> sorted(set([(i[0], i[1], i[2]) for i in result.select(result.id, result.index1, ... result.index2).collect()]), key=lambda x: x[0]) [(0, 0.0, 0.0), (1, 1.0, 1.0), (2, 2.0, 0.0), (3, 0.0, 1.0), (4, 0.0, 1.0), (5, 2.0, 1.0)] """ @keyword_only def __init__(self, *, inputCol=None, outputCol=None, inputCols=None, outputCols=None, handleInvalid="error", stringOrderType="frequencyDesc"): """ __init__(self, \\*, inputCol=None, outputCol=None, inputCols=None, outputCols=None, \ handleInvalid="error", stringOrderType="frequencyDesc") """ super(StringIndexer, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StringIndexer", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, *, inputCol=None, outputCol=None, inputCols=None, outputCols=None, handleInvalid="error", stringOrderType="frequencyDesc"): """ setParams(self, \\*, inputCol=None, outputCol=None, inputCols=None, outputCols=None, \ handleInvalid="error", stringOrderType="frequencyDesc") Sets params for this StringIndexer. """ kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return StringIndexerModel(java_model) @since("2.3.0") def setStringOrderType(self, value): """ Sets the value of :py:attr:`stringOrderType`. """ return self._set(stringOrderType=value) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setInputCols(self, value): """ Sets the value of :py:attr:`inputCols`. """ return self._set(inputCols=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @since("3.0.0") def setOutputCols(self, value): """ Sets the value of :py:attr:`outputCols`. """ return self._set(outputCols=value) def setHandleInvalid(self, value): """ Sets the value of :py:attr:`handleInvalid`. """ return self._set(handleInvalid=value) class StringIndexerModel(JavaModel, _StringIndexerParams, JavaMLReadable, JavaMLWritable): """ Model fitted by :py:class:`StringIndexer`. .. versionadded:: 1.4.0 """ def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setInputCols(self, value): """ Sets the value of :py:attr:`inputCols`. """ return self._set(inputCols=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @since("3.0.0") def setOutputCols(self, value): """ Sets the value of :py:attr:`outputCols`. """ return self._set(outputCols=value) @since("2.4.0") def setHandleInvalid(self, value): """ Sets the value of :py:attr:`handleInvalid`. """ return self._set(handleInvalid=value) @classmethod @since("2.4.0") def from_labels(cls, labels, inputCol, outputCol=None, handleInvalid=None): """ Construct the model directly from an array of label strings, requires an active SparkContext. """ sc = SparkContext._active_spark_context java_class = sc._gateway.jvm.java.lang.String jlabels = StringIndexerModel._new_java_array(labels, java_class) model = StringIndexerModel._create_from_java_class( "org.apache.spark.ml.feature.StringIndexerModel", jlabels) model.setInputCol(inputCol) if outputCol is not None: model.setOutputCol(outputCol) if handleInvalid is not None: model.setHandleInvalid(handleInvalid) return model @classmethod @since("3.0.0") def from_arrays_of_labels(cls, arrayOfLabels, inputCols, outputCols=None, handleInvalid=None): """ Construct the model directly from an array of array of label strings, requires an active SparkContext. """ sc = SparkContext._active_spark_context java_class = sc._gateway.jvm.java.lang.String jlabels = StringIndexerModel._new_java_array(arrayOfLabels, java_class) model = StringIndexerModel._create_from_java_class( "org.apache.spark.ml.feature.StringIndexerModel", jlabels) model.setInputCols(inputCols) if outputCols is not None: model.setOutputCols(outputCols) if handleInvalid is not None: model.setHandleInvalid(handleInvalid) return model @property @since("1.5.0") def labels(self): """ Ordered list of labels, corresponding to indices to be assigned. .. deprecated:: 3.1.0 It will be removed in future versions. Use `labelsArray` method instead. """ return self._call_java("labels") @property @since("3.0.2") def labelsArray(self): """ Array of ordered list of labels, corresponding to indices to be assigned for each input column. """ return self._call_java("labelsArray") @inherit_doc class IndexToString(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable): """ A :py:class:`pyspark.ml.base.Transformer` that maps a column of indices back to a new column of corresponding string values. The index-string mapping is either from the ML attributes of the input column, or from user-supplied labels (which take precedence over ML attributes). .. versionadded:: 1.6.0 See Also -------- StringIndexer : for converting categorical values into category indices """ labels = Param(Params._dummy(), "labels", "Optional array of labels specifying index-string mapping." + " If not provided or if empty, then metadata from inputCol is used instead.", typeConverter=TypeConverters.toListString) @keyword_only def __init__(self, *, inputCol=None, outputCol=None, labels=None): """ __init__(self, \\*, inputCol=None, outputCol=None, labels=None) """ super(IndexToString, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.IndexToString", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.6.0") def setParams(self, *, inputCol=None, outputCol=None, labels=None): """ setParams(self, \\*, inputCol=None, outputCol=None, labels=None) Sets params for this IndexToString. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.6.0") def setLabels(self, value): """ Sets the value of :py:attr:`labels`. """ return self._set(labels=value) @since("1.6.0") def getLabels(self): """ Gets the value of :py:attr:`labels` or its default value. """ return self.getOrDefault(self.labels) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) class StopWordsRemover(JavaTransformer, HasInputCol, HasOutputCol, HasInputCols, HasOutputCols, JavaMLReadable, JavaMLWritable): """ A feature transformer that filters out stop words from input. Since 3.0.0, :py:class:`StopWordsRemover` can filter out multiple columns at once by setting the :py:attr:`inputCols` parameter. Note that when both the :py:attr:`inputCol` and :py:attr:`inputCols` parameters are set, an Exception will be thrown. .. versionadded:: 1.6.0 Notes ----- null values from input array are preserved unless adding null to stopWords explicitly. Examples -------- >>> df = spark.createDataFrame([(["a", "b", "c"],)], ["text"]) >>> remover = StopWordsRemover(stopWords=["b"]) >>> remover.setInputCol("text") StopWordsRemover... >>> remover.setOutputCol("words") StopWordsRemover... >>> remover.transform(df).head().words == ['a', 'c'] True >>> stopWordsRemoverPath = temp_path + "/stopwords-remover" >>> remover.save(stopWordsRemoverPath) >>> loadedRemover = StopWordsRemover.load(stopWordsRemoverPath) >>> loadedRemover.getStopWords() == remover.getStopWords() True >>> loadedRemover.getCaseSensitive() == remover.getCaseSensitive() True >>> loadedRemover.transform(df).take(1) == remover.transform(df).take(1) True >>> df2 = spark.createDataFrame([(["a", "b", "c"], ["a", "b"])], ["text1", "text2"]) >>> remover2 = StopWordsRemover(stopWords=["b"]) >>> remover2.setInputCols(["text1", "text2"]).setOutputCols(["words1", "words2"]) StopWordsRemover... >>> remover2.transform(df2).show() +---------+------+------+------+ | text1| text2|words1|words2| +---------+------+------+------+ |[a, b, c]|[a, b]|[a, c]| [a]| +---------+------+------+------+ ... """ stopWords = Param(Params._dummy(), "stopWords", "The words to be filtered out", typeConverter=TypeConverters.toListString) caseSensitive = Param(Params._dummy(), "caseSensitive", "whether to do a case sensitive " + "comparison over the stop words", typeConverter=TypeConverters.toBoolean) locale = Param(Params._dummy(), "locale", "locale of the input. ignored when case sensitive " + "is true", typeConverter=TypeConverters.toString) @keyword_only def __init__(self, *, inputCol=None, outputCol=None, stopWords=None, caseSensitive=False, locale=None, inputCols=None, outputCols=None): """ __init__(self, \\*, inputCol=None, outputCol=None, stopWords=None, caseSensitive=false, \ locale=None, inputCols=None, outputCols=None) """ super(StopWordsRemover, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StopWordsRemover", self.uid) self._setDefault(stopWords=StopWordsRemover.loadDefaultStopWords("english"), caseSensitive=False, locale=self._java_obj.getLocale()) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.6.0") def setParams(self, *, inputCol=None, outputCol=None, stopWords=None, caseSensitive=False, locale=None, inputCols=None, outputCols=None): """ setParams(self, \\*, inputCol=None, outputCol=None, stopWords=None, caseSensitive=false, \ locale=None, inputCols=None, outputCols=None) Sets params for this StopWordRemover. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.6.0") def setStopWords(self, value): """ Sets the value of :py:attr:`stopWords`. """ return self._set(stopWords=value) @since("1.6.0") def getStopWords(self): """ Gets the value of :py:attr:`stopWords` or its default value. """ return self.getOrDefault(self.stopWords) @since("1.6.0") def setCaseSensitive(self, value): """ Sets the value of :py:attr:`caseSensitive`. """ return self._set(caseSensitive=value) @since("1.6.0") def getCaseSensitive(self): """ Gets the value of :py:attr:`caseSensitive` or its default value. """ return self.getOrDefault(self.caseSensitive) @since("2.4.0") def setLocale(self, value): """ Sets the value of :py:attr:`locale`. """ return self._set(locale=value) @since("2.4.0") def getLocale(self): """ Gets the value of :py:attr:`locale`. """ return self.getOrDefault(self.locale) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @since("3.0.0") def setInputCols(self, value): """ Sets the value of :py:attr:`inputCols`. """ return self._set(inputCols=value) @since("3.0.0") def setOutputCols(self, value): """ Sets the value of :py:attr:`outputCols`. """ return self._set(outputCols=value) @staticmethod @since("2.0.0") def loadDefaultStopWords(language): """ Loads the default stop words for the given language. Supported languages: danish, dutch, english, finnish, french, german, hungarian, italian, norwegian, portuguese, russian, spanish, swedish, turkish """ stopWordsObj = _jvm().org.apache.spark.ml.feature.StopWordsRemover return list(stopWordsObj.loadDefaultStopWords(language)) @inherit_doc class Tokenizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable): """ A tokenizer that converts the input string to lowercase and then splits it by white spaces. .. versionadded:: 1.3.0 Examples -------- >>> df = spark.createDataFrame([("a b c",)], ["text"]) >>> tokenizer = Tokenizer(outputCol="words") >>> tokenizer.setInputCol("text") Tokenizer... >>> tokenizer.transform(df).head() Row(text='a b c', words=['a', 'b', 'c']) >>> # Change a parameter. >>> tokenizer.setParams(outputCol="tokens").transform(df).head() Row(text='a b c', tokens=['a', 'b', 'c']) >>> # Temporarily modify a parameter. >>> tokenizer.transform(df, {tokenizer.outputCol: "words"}).head() Row(text='a b c', words=['a', 'b', 'c']) >>> tokenizer.transform(df).head() Row(text='a b c', tokens=['a', 'b', 'c']) >>> # Must use keyword arguments to specify params. >>> tokenizer.setParams("text") Traceback (most recent call last): ... TypeError: Method setParams forces keyword arguments. >>> tokenizerPath = temp_path + "/tokenizer" >>> tokenizer.save(tokenizerPath) >>> loadedTokenizer = Tokenizer.load(tokenizerPath) >>> loadedTokenizer.transform(df).head().tokens == tokenizer.transform(df).head().tokens True """ @keyword_only def __init__(self, *, inputCol=None, outputCol=None): """ __init__(self, \\*, inputCol=None, outputCol=None) """ super(Tokenizer, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Tokenizer", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.3.0") def setParams(self, *, inputCol=None, outputCol=None): """ setParams(self, \\*, inputCol=None, outputCol=None) Sets params for this Tokenizer. """ kwargs = self._input_kwargs return self._set(**kwargs) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @inherit_doc class VectorAssembler(JavaTransformer, HasInputCols, HasOutputCol, HasHandleInvalid, JavaMLReadable, JavaMLWritable): """ A feature transformer that merges multiple columns into a vector column. .. versionadded:: 1.4.0 Examples -------- >>> df = spark.createDataFrame([(1, 0, 3)], ["a", "b", "c"]) >>> vecAssembler = VectorAssembler(outputCol="features") >>> vecAssembler.setInputCols(["a", "b", "c"]) VectorAssembler... >>> vecAssembler.transform(df).head().features DenseVector([1.0, 0.0, 3.0]) >>> vecAssembler.setParams(outputCol="freqs").transform(df).head().freqs DenseVector([1.0, 0.0, 3.0]) >>> params = {vecAssembler.inputCols: ["b", "a"], vecAssembler.outputCol: "vector"} >>> vecAssembler.transform(df, params).head().vector DenseVector([0.0, 1.0]) >>> vectorAssemblerPath = temp_path + "/vector-assembler" >>> vecAssembler.save(vectorAssemblerPath) >>> loadedAssembler = VectorAssembler.load(vectorAssemblerPath) >>> loadedAssembler.transform(df).head().freqs == vecAssembler.transform(df).head().freqs True >>> dfWithNullsAndNaNs = spark.createDataFrame( ... [(1.0, 2.0, None), (3.0, float("nan"), 4.0), (5.0, 6.0, 7.0)], ["a", "b", "c"]) >>> vecAssembler2 = VectorAssembler(inputCols=["a", "b", "c"], outputCol="features", ... handleInvalid="keep") >>> vecAssembler2.transform(dfWithNullsAndNaNs).show() +---+---+----+-------------+ | a| b| c| features| +---+---+----+-------------+ |1.0|2.0|null|[1.0,2.0,NaN]| |3.0|NaN| 4.0|[3.0,NaN,4.0]| |5.0|6.0| 7.0|[5.0,6.0,7.0]| +---+---+----+-------------+ ... >>> vecAssembler2.setParams(handleInvalid="skip").transform(dfWithNullsAndNaNs).show() +---+---+---+-------------+ | a| b| c| features| +---+---+---+-------------+ |5.0|6.0|7.0|[5.0,6.0,7.0]| +---+---+---+-------------+ ... """ handleInvalid = Param(Params._dummy(), "handleInvalid", "How to handle invalid data (NULL " + "and NaN values). Options are 'skip' (filter out rows with invalid " + "data), 'error' (throw an error), or 'keep' (return relevant number " + "of NaN in the output). Column lengths are taken from the size of ML " + "Attribute Group, which can be set using `VectorSizeHint` in a " + "pipeline before `VectorAssembler`. Column lengths can also be " + "inferred from first rows of the data since it is safe to do so but " + "only in case of 'error' or 'skip').", typeConverter=TypeConverters.toString) @keyword_only def __init__(self, *, inputCols=None, outputCol=None, handleInvalid="error"): """ __init__(self, \\*, inputCols=None, outputCol=None, handleInvalid="error") """ super(VectorAssembler, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorAssembler", self.uid) self._setDefault(handleInvalid="error") kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, *, inputCols=None, outputCol=None, handleInvalid="error"): """ setParams(self, \\*, inputCols=None, outputCol=None, handleInvalid="error") Sets params for this VectorAssembler. """ kwargs = self._input_kwargs return self._set(**kwargs) def setInputCols(self, value): """ Sets the value of :py:attr:`inputCols`. """ return self._set(inputCols=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def setHandleInvalid(self, value): """ Sets the value of :py:attr:`handleInvalid`. """ return self._set(handleInvalid=value) class _VectorIndexerParams(HasInputCol, HasOutputCol, HasHandleInvalid): """ Params for :py:class:`VectorIndexer` and :py:class:`VectorIndexerModel`. .. versionadded:: 3.0.0 """ maxCategories = Param(Params._dummy(), "maxCategories", "Threshold for the number of values a categorical feature can take " + "(>= 2). If a feature is found to have > maxCategories values, then " + "it is declared continuous.", typeConverter=TypeConverters.toInt) handleInvalid = Param(Params._dummy(), "handleInvalid", "How to handle invalid data " + "(unseen labels or NULL values). Options are 'skip' (filter out " + "rows with invalid data), 'error' (throw an error), or 'keep' (put " + "invalid data in a special additional bucket, at index of the number " + "of categories of the feature).", typeConverter=TypeConverters.toString) def __init__(self, *args): super(_VectorIndexerParams, self).__init__(*args) self._setDefault(maxCategories=20, handleInvalid="error") @since("1.4.0") def getMaxCategories(self): """ Gets the value of maxCategories or its default value. """ return self.getOrDefault(self.maxCategories) @inherit_doc class VectorIndexer(JavaEstimator, _VectorIndexerParams, JavaMLReadable, JavaMLWritable): """ Class for indexing categorical feature columns in a dataset of `Vector`. This has 2 usage modes: - Automatically identify categorical features (default behavior) - This helps process a dataset of unknown vectors into a dataset with some continuous features and some categorical features. The choice between continuous and categorical is based upon a maxCategories parameter. - Set maxCategories to the maximum number of categorical any categorical feature should have. - E.g.: Feature 0 has unique values {-1.0, 0.0}, and feature 1 values {1.0, 3.0, 5.0}. If maxCategories = 2, then feature 0 will be declared categorical and use indices {0, 1}, and feature 1 will be declared continuous. - Index all features, if all features are categorical - If maxCategories is set to be very large, then this will build an index of unique values for all features. - Warning: This can cause problems if features are continuous since this will collect ALL unique values to the driver. - E.g.: Feature 0 has unique values {-1.0, 0.0}, and feature 1 values {1.0, 3.0, 5.0}. If maxCategories >= 3, then both features will be declared categorical. This returns a model which can transform categorical features to use 0-based indices. Index stability: - This is not guaranteed to choose the same category index across multiple runs. - If a categorical feature includes value 0, then this is guaranteed to map value 0 to index 0. This maintains vector sparsity. - More stability may be added in the future. TODO: Future extensions: The following functionality is planned for the future: - Preserve metadata in transform; if a feature's metadata is already present, do not recompute. - Specify certain features to not index, either via a parameter or via existing metadata. - Add warning if a categorical feature has only 1 category. .. versionadded:: 1.4.0 Examples -------- >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([(Vectors.dense([-1.0, 0.0]),), ... (Vectors.dense([0.0, 1.0]),), (Vectors.dense([0.0, 2.0]),)], ["a"]) >>> indexer = VectorIndexer(maxCategories=2, inputCol="a") >>> indexer.setOutputCol("indexed") VectorIndexer... >>> model = indexer.fit(df) >>> indexer.getHandleInvalid() 'error' >>> model.setOutputCol("output") VectorIndexerModel... >>> model.transform(df).head().output DenseVector([1.0, 0.0]) >>> model.numFeatures 2 >>> model.categoryMaps {0: {0.0: 0, -1.0: 1}} >>> indexer.setParams(outputCol="test").fit(df).transform(df).collect()[1].test DenseVector([0.0, 1.0]) >>> params = {indexer.maxCategories: 3, indexer.outputCol: "vector"} >>> model2 = indexer.fit(df, params) >>> model2.transform(df).head().vector DenseVector([1.0, 0.0]) >>> vectorIndexerPath = temp_path + "/vector-indexer" >>> indexer.save(vectorIndexerPath) >>> loadedIndexer = VectorIndexer.load(vectorIndexerPath) >>> loadedIndexer.getMaxCategories() == indexer.getMaxCategories() True >>> modelPath = temp_path + "/vector-indexer-model" >>> model.save(modelPath) >>> loadedModel = VectorIndexerModel.load(modelPath) >>> loadedModel.numFeatures == model.numFeatures True >>> loadedModel.categoryMaps == model.categoryMaps True >>> loadedModel.transform(df).take(1) == model.transform(df).take(1) True >>> dfWithInvalid = spark.createDataFrame([(Vectors.dense([3.0, 1.0]),)], ["a"]) >>> indexer.getHandleInvalid() 'error' >>> model3 = indexer.setHandleInvalid("skip").fit(df) >>> model3.transform(dfWithInvalid).count() 0 >>> model4 = indexer.setParams(handleInvalid="keep", outputCol="indexed").fit(df) >>> model4.transform(dfWithInvalid).head().indexed DenseVector([2.0, 1.0]) """ @keyword_only def __init__(self, *, maxCategories=20, inputCol=None, outputCol=None, handleInvalid="error"): """ __init__(self, \\*, maxCategories=20, inputCol=None, outputCol=None, handleInvalid="error") """ super(VectorIndexer, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorIndexer", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, *, maxCategories=20, inputCol=None, outputCol=None, handleInvalid="error"): """ setParams(self, \\*, maxCategories=20, inputCol=None, outputCol=None, handleInvalid="error") Sets params for this VectorIndexer. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.4.0") def setMaxCategories(self, value): """ Sets the value of :py:attr:`maxCategories`. """ return self._set(maxCategories=value) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def setHandleInvalid(self, value): """ Sets the value of :py:attr:`handleInvalid`. """ return self._set(handleInvalid=value) def _create_model(self, java_model): return VectorIndexerModel(java_model) class VectorIndexerModel(JavaModel, _VectorIndexerParams, JavaMLReadable, JavaMLWritable): """ Model fitted by :py:class:`VectorIndexer`. Transform categorical features to use 0-based indices instead of their original values. - Categorical features are mapped to indices. - Continuous features (columns) are left unchanged. This also appends metadata to the output column, marking features as Numeric (continuous), Nominal (categorical), or Binary (either continuous or categorical). Non-ML metadata is not carried over from the input to the output column. This maintains vector sparsity. .. versionadded:: 1.4.0 """ @since("3.0.0") def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @property @since("1.4.0") def numFeatures(self): """ Number of features, i.e., length of Vectors which this transforms. """ return self._call_java("numFeatures") @property @since("1.4.0") def categoryMaps(self): """ Feature value index. Keys are categorical feature indices (column indices). Values are maps from original features values to 0-based category indices. If a feature is not in this map, it is treated as continuous. """ return self._call_java("javaCategoryMaps") @inherit_doc class VectorSlicer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable): """ This class takes a feature vector and outputs a new feature vector with a subarray of the original features. The subset of features can be specified with either indices (`setIndices()`) or names (`setNames()`). At least one feature must be selected. Duplicate features are not allowed, so there can be no overlap between selected indices and names. The output vector will order features with the selected indices first (in the order given), followed by the selected names (in the order given). .. versionadded:: 1.6.0 Examples -------- >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([ ... (Vectors.dense([-2.0, 2.3, 0.0, 0.0, 1.0]),), ... (Vectors.dense([0.0, 0.0, 0.0, 0.0, 0.0]),), ... (Vectors.dense([0.6, -1.1, -3.0, 4.5, 3.3]),)], ["features"]) >>> vs = VectorSlicer(outputCol="sliced", indices=[1, 4]) >>> vs.setInputCol("features") VectorSlicer... >>> vs.transform(df).head().sliced DenseVector([2.3, 1.0]) >>> vectorSlicerPath = temp_path + "/vector-slicer" >>> vs.save(vectorSlicerPath) >>> loadedVs = VectorSlicer.load(vectorSlicerPath) >>> loadedVs.getIndices() == vs.getIndices() True >>> loadedVs.getNames() == vs.getNames() True >>> loadedVs.transform(df).take(1) == vs.transform(df).take(1) True """ indices = Param(Params._dummy(), "indices", "An array of indices to select features from " + "a vector column. There can be no overlap with names.", typeConverter=TypeConverters.toListInt) names = Param(Params._dummy(), "names", "An array of feature names to select features from " + "a vector column. These names must be specified by ML " + "org.apache.spark.ml.attribute.Attribute. There can be no overlap with " + "indices.", typeConverter=TypeConverters.toListString) @keyword_only def __init__(self, *, inputCol=None, outputCol=None, indices=None, names=None): """ __init__(self, \\*, inputCol=None, outputCol=None, indices=None, names=None) """ super(VectorSlicer, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorSlicer", self.uid) self._setDefault(indices=[], names=[]) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.6.0") def setParams(self, *, inputCol=None, outputCol=None, indices=None, names=None): """ setParams(self, \\*, inputCol=None, outputCol=None, indices=None, names=None): Sets params for this VectorSlicer. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.6.0") def setIndices(self, value): """ Sets the value of :py:attr:`indices`. """ return self._set(indices=value) @since("1.6.0") def getIndices(self): """ Gets the value of indices or its default value. """ return self.getOrDefault(self.indices) @since("1.6.0") def setNames(self, value): """ Sets the value of :py:attr:`names`. """ return self._set(names=value) @since("1.6.0") def getNames(self): """ Gets the value of names or its default value. """ return self.getOrDefault(self.names) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) class _Word2VecParams(HasStepSize, HasMaxIter, HasSeed, HasInputCol, HasOutputCol): """ Params for :py:class:`Word2Vec` and :py:class:`Word2VecModel`. .. versionadded:: 3.0.0 """ vectorSize = Param(Params._dummy(), "vectorSize", "the dimension of codes after transforming from words", typeConverter=TypeConverters.toInt) numPartitions = Param(Params._dummy(), "numPartitions", "number of partitions for sentences of words", typeConverter=TypeConverters.toInt) minCount = Param(Params._dummy(), "minCount", "the minimum number of times a token must appear to be included in the " + "word2vec model's vocabulary", typeConverter=TypeConverters.toInt) windowSize = Param(Params._dummy(), "windowSize", "the window size (context words from [-window, window]). Default value is 5", typeConverter=TypeConverters.toInt) maxSentenceLength = Param(Params._dummy(), "maxSentenceLength", "Maximum length (in words) of each sentence in the input data. " + "Any sentence longer than this threshold will " + "be divided into chunks up to the size.", typeConverter=TypeConverters.toInt) def __init__(self, *args): super(_Word2VecParams, self).__init__(*args) self._setDefault(vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1, windowSize=5, maxSentenceLength=1000) @since("1.4.0") def getVectorSize(self): """ Gets the value of vectorSize or its default value. """ return self.getOrDefault(self.vectorSize) @since("1.4.0") def getNumPartitions(self): """ Gets the value of numPartitions or its default value. """ return self.getOrDefault(self.numPartitions) @since("1.4.0") def getMinCount(self): """ Gets the value of minCount or its default value. """ return self.getOrDefault(self.minCount) @since("2.0.0") def getWindowSize(self): """ Gets the value of windowSize or its default value. """ return self.getOrDefault(self.windowSize) @since("2.0.0") def getMaxSentenceLength(self): """ Gets the value of maxSentenceLength or its default value. """ return self.getOrDefault(self.maxSentenceLength) @inherit_doc class Word2Vec(JavaEstimator, _Word2VecParams, JavaMLReadable, JavaMLWritable): """ Word2Vec trains a model of `Map(String, Vector)`, i.e. transforms a word into a code for further natural language processing or machine learning process. .. versionadded:: 1.4.0 Examples -------- >>> sent = ("a b " * 100 + "a c " * 10).split(" ") >>> doc = spark.createDataFrame([(sent,), (sent,)], ["sentence"]) >>> word2Vec = Word2Vec(vectorSize=5, seed=42, inputCol="sentence", outputCol="model") >>> word2Vec.setMaxIter(10) Word2Vec... >>> word2Vec.getMaxIter() 10 >>> word2Vec.clear(word2Vec.maxIter) >>> model = word2Vec.fit(doc) >>> model.getMinCount() 5 >>> model.setInputCol("sentence") Word2VecModel... >>> model.getVectors().show() +----+--------------------+ |word| vector| +----+--------------------+ | a|[0.0951... | b|[-1.202... | c|[0.3015... +----+--------------------+ ... >>> model.findSynonymsArray("a", 2) [('b', 0.015859...), ('c', -0.568079...)] >>> from pyspark.sql.functions import format_number as fmt >>> model.findSynonyms("a", 2).select("word", fmt("similarity", 5).alias("similarity")).show() +----+----------+ |word|similarity| +----+----------+ | b| 0.01586| | c| -0.56808| +----+----------+ ... >>> model.transform(doc).head().model DenseVector([-0.4833, 0.1855, -0.273, -0.0509, -0.4769]) >>> word2vecPath = temp_path + "/word2vec" >>> word2Vec.save(word2vecPath) >>> loadedWord2Vec = Word2Vec.load(word2vecPath) >>> loadedWord2Vec.getVectorSize() == word2Vec.getVectorSize() True >>> loadedWord2Vec.getNumPartitions() == word2Vec.getNumPartitions() True >>> loadedWord2Vec.getMinCount() == word2Vec.getMinCount() True >>> modelPath = temp_path + "/word2vec-model" >>> model.save(modelPath) >>> loadedModel = Word2VecModel.load(modelPath) >>> loadedModel.getVectors().first().word == model.getVectors().first().word True >>> loadedModel.getVectors().first().vector == model.getVectors().first().vector True >>> loadedModel.transform(doc).take(1) == model.transform(doc).take(1) True """ @keyword_only def __init__(self, *, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1, seed=None, inputCol=None, outputCol=None, windowSize=5, maxSentenceLength=1000): """ __init__(self, \\*, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, \ maxIter=1, seed=None, inputCol=None, outputCol=None, windowSize=5, \ maxSentenceLength=1000) """ super(Word2Vec, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Word2Vec", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, *, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1, seed=None, inputCol=None, outputCol=None, windowSize=5, maxSentenceLength=1000): """ setParams(self, \\*, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1, \ seed=None, inputCol=None, outputCol=None, windowSize=5, \ maxSentenceLength=1000) Sets params for this Word2Vec. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.4.0") def setVectorSize(self, value): """ Sets the value of :py:attr:`vectorSize`. """ return self._set(vectorSize=value) @since("1.4.0") def setNumPartitions(self, value): """ Sets the value of :py:attr:`numPartitions`. """ return self._set(numPartitions=value) @since("1.4.0") def setMinCount(self, value): """ Sets the value of :py:attr:`minCount`. """ return self._set(minCount=value) @since("2.0.0") def setWindowSize(self, value): """ Sets the value of :py:attr:`windowSize`. """ return self._set(windowSize=value) @since("2.0.0") def setMaxSentenceLength(self, value): """ Sets the value of :py:attr:`maxSentenceLength`. """ return self._set(maxSentenceLength=value) def setMaxIter(self, value): """ Sets the value of :py:attr:`maxIter`. """ return self._set(maxIter=value) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def setSeed(self, value): """ Sets the value of :py:attr:`seed`. """ return self._set(seed=value) @since("1.4.0") def setStepSize(self, value): """ Sets the value of :py:attr:`stepSize`. """ return self._set(stepSize=value) def _create_model(self, java_model): return Word2VecModel(java_model) class Word2VecModel(JavaModel, _Word2VecParams, JavaMLReadable, JavaMLWritable): """ Model fitted by :py:class:`Word2Vec`. .. versionadded:: 1.4.0 """ @since("1.5.0") def getVectors(self): """ Returns the vector representation of the words as a dataframe with two fields, word and vector. """ return self._call_java("getVectors") def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @since("1.5.0") def findSynonyms(self, word, num): """ Find "num" number of words closest in similarity to "word". word can be a string or vector representation. Returns a dataframe with two fields word and similarity (which gives the cosine similarity). """ if not isinstance(word, str): word = _convert_to_vector(word) return self._call_java("findSynonyms", word, num) @since("2.3.0") def findSynonymsArray(self, word, num): """ Find "num" number of words closest in similarity to "word". word can be a string or vector representation. Returns an array with two fields word and similarity (which gives the cosine similarity). """ if not isinstance(word, str): word = _convert_to_vector(word) tuples = self._java_obj.findSynonymsArray(word, num) return list(map(lambda st: (st._1(), st._2()), list(tuples))) class _PCAParams(HasInputCol, HasOutputCol): """ Params for :py:class:`PCA` and :py:class:`PCAModel`. .. versionadded:: 3.0.0 """ k = Param(Params._dummy(), "k", "the number of principal components", typeConverter=TypeConverters.toInt) @since("1.5.0") def getK(self): """ Gets the value of k or its default value. """ return self.getOrDefault(self.k) @inherit_doc class PCA(JavaEstimator, _PCAParams, JavaMLReadable, JavaMLWritable): """ PCA trains a model to project vectors to a lower dimensional space of the top :py:attr:`k` principal components. .. versionadded:: 1.5.0 Examples -------- >>> from pyspark.ml.linalg import Vectors >>> data = [(Vectors.sparse(5, [(1, 1.0), (3, 7.0)]),), ... (Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),), ... (Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0]),)] >>> df = spark.createDataFrame(data,["features"]) >>> pca = PCA(k=2, inputCol="features") >>> pca.setOutputCol("pca_features") PCA... >>> model = pca.fit(df) >>> model.getK() 2 >>> model.setOutputCol("output") PCAModel... >>> model.transform(df).collect()[0].output DenseVector([1.648..., -4.013...]) >>> model.explainedVariance DenseVector([0.794..., 0.205...]) >>> pcaPath = temp_path + "/pca" >>> pca.save(pcaPath) >>> loadedPca = PCA.load(pcaPath) >>> loadedPca.getK() == pca.getK() True >>> modelPath = temp_path + "/pca-model" >>> model.save(modelPath) >>> loadedModel = PCAModel.load(modelPath) >>> loadedModel.pc == model.pc True >>> loadedModel.explainedVariance == model.explainedVariance True >>> loadedModel.transform(df).take(1) == model.transform(df).take(1) True """ @keyword_only def __init__(self, *, k=None, inputCol=None, outputCol=None): """ __init__(self, \\*, k=None, inputCol=None, outputCol=None) """ super(PCA, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.PCA", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.5.0") def setParams(self, *, k=None, inputCol=None, outputCol=None): """ setParams(self, \\*, k=None, inputCol=None, outputCol=None) Set params for this PCA. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.5.0") def setK(self, value): """ Sets the value of :py:attr:`k`. """ return self._set(k=value) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def _create_model(self, java_model): return PCAModel(java_model) class PCAModel(JavaModel, _PCAParams, JavaMLReadable, JavaMLWritable): """ Model fitted by :py:class:`PCA`. Transforms vectors to a lower dimensional space. .. versionadded:: 1.5.0 """ @since("3.0.0") def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) @since("3.0.0") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @property @since("2.0.0") def pc(self): """ Returns a principal components Matrix. Each column is one principal component. """ return self._call_java("pc") @property @since("2.0.0") def explainedVariance(self): """ Returns a vector of proportions of variance explained by each principal component. """ return self._call_java("explainedVariance") class _RFormulaParams(HasFeaturesCol, HasLabelCol, HasHandleInvalid): """ Params for :py:class:`RFormula` and :py:class:`RFormula`. .. versionadded:: 3.0.0 """ formula = Param(Params._dummy(), "formula", "R model formula", typeConverter=TypeConverters.toString) forceIndexLabel = Param(Params._dummy(), "forceIndexLabel", "Force to index label whether it is numeric or string", typeConverter=TypeConverters.toBoolean) stringIndexerOrderType = Param(Params._dummy(), "stringIndexerOrderType", "How to order categories of a string feature column used by " + "StringIndexer. The last category after ordering is dropped " + "when encoding strings. Supported options: frequencyDesc, " + "frequencyAsc, alphabetDesc, alphabetAsc. The default value " + "is frequencyDesc. When the ordering is set to alphabetDesc, " + "RFormula drops the same category as R when encoding strings.", typeConverter=TypeConverters.toString) handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. " + "Options are 'skip' (filter out rows with invalid values), " + "'error' (throw an error), or 'keep' (put invalid data in a special " + "additional bucket, at index numLabels).", typeConverter=TypeConverters.toString) def __init__(self, *args): super(_RFormulaParams, self).__init__(*args) self._setDefault(forceIndexLabel=False, stringIndexerOrderType="frequencyDesc", handleInvalid="error") @since("1.5.0") def getFormula(self): """ Gets the value of :py:attr:`formula`. """ return self.getOrDefault(self.formula) @since("2.1.0") def getForceIndexLabel(self): """ Gets the value of :py:attr:`forceIndexLabel`. """ return self.getOrDefault(self.forceIndexLabel) @since("2.3.0") def getStringIndexerOrderType(self): """ Gets the value of :py:attr:`stringIndexerOrderType` or its default value 'frequencyDesc'. """ return self.getOrDefault(self.stringIndexerOrderType) @inherit_doc class RFormula(JavaEstimator, _RFormulaParams, JavaMLReadable, JavaMLWritable): """ Implements the transforms required for fitting a dataset against an R model formula. Currently we support a limited subset of the R operators, including '~', '.', ':', '+', '-', '*', and '^'. .. versionadded:: 1.5.0 Notes ----- Also see the `R formula docs <http://stat.ethz.ch/R-manual/R-patched/library/stats/html/formula.html>`_. Examples -------- >>> df = spark.createDataFrame([ ... (1.0, 1.0, "a"), ... (0.0, 2.0, "b"), ... (0.0, 0.0, "a") ... ], ["y", "x", "s"]) >>> rf = RFormula(formula="y ~ x + s") >>> model = rf.fit(df) >>> model.getLabelCol() 'label' >>> model.transform(df).show() +---+---+---+---------+-----+ | y| x| s| features|label| +---+---+---+---------+-----+ |1.0|1.0| a|[1.0,1.0]| 1.0| |0.0|2.0| b|[2.0,0.0]| 0.0| |0.0|0.0| a|[0.0,1.0]| 0.0| +---+---+---+---------+-----+ ... >>> rf.fit(df, {rf.formula: "y ~ . - s"}).transform(df).show() +---+---+---+--------+-----+ | y| x| s|features|label| +---+---+---+--------+-----+ |1.0|1.0| a| [1.0]| 1.0| |0.0|2.0| b| [2.0]| 0.0| |0.0|0.0| a| [0.0]| 0.0| +---+---+---+--------+-----+ ... >>> rFormulaPath = temp_path + "/rFormula" >>> rf.save(rFormulaPath) >>> loadedRF = RFormula.load(rFormulaPath) >>> loadedRF.getFormula() == rf.getFormula() True >>> loadedRF.getFeaturesCol() == rf.getFeaturesCol() True >>> loadedRF.getLabelCol() == rf.getLabelCol() True >>> loadedRF.getHandleInvalid() == rf.getHandleInvalid() True >>> str(loadedRF) 'RFormula(y ~ x + s) (uid=...)' >>> modelPath = temp_path + "/rFormulaModel" >>> model.save(modelPath) >>> loadedModel = RFormulaModel.load(modelPath) >>> loadedModel.uid == model.uid True >>> loadedModel.transform(df).show() +---+---+---+---------+-----+ | y| x| s| features|label| +---+---+---+---------+-----+ |1.0|1.0| a|[1.0,1.0]| 1.0| |0.0|2.0| b|[2.0,0.0]| 0.0| |0.0|0.0| a|[0.0,1.0]| 0.0| +---+---+---+---------+-----+ ... >>> str(loadedModel) 'RFormulaModel(ResolvedRFormula(label=y, terms=[x,s], hasIntercept=true)) (uid=...)' """ @keyword_only def __init__(self, *, formula=None, featuresCol="features", labelCol="label", forceIndexLabel=False, stringIndexerOrderType="frequencyDesc", handleInvalid="error"): """ __init__(self, \\*, formula=None, featuresCol="features", labelCol="label", \ forceIndexLabel=False, stringIndexerOrderType="frequencyDesc", \ handleInvalid="error") """ super(RFormula, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RFormula", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.5.0") def setParams(self, *, formula=None, featuresCol="features", labelCol="label", forceIndexLabel=False, stringIndexerOrderType="frequencyDesc", handleInvalid="error"): """ setParams(self, \\*, formula=None, featuresCol="features", labelCol="label", \ forceIndexLabel=False, stringIndexerOrderType="frequencyDesc", \ handleInvalid="error") Sets params for RFormula. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.5.0") def setFormula(self, value): """ Sets the value of :py:attr:`formula`. """ return self._set(formula=value) @since("2.1.0") def setForceIndexLabel(self, value): """ Sets the value of :py:attr:`forceIndexLabel`. """ return self._set(forceIndexLabel=value) @since("2.3.0") def setStringIndexerOrderType(self, value): """ Sets the value of :py:attr:`stringIndexerOrderType`. """ return self._set(stringIndexerOrderType=value) def setFeaturesCol(self, value): """ Sets the value of :py:attr:`featuresCol`. """ return self._set(featuresCol=value) def setLabelCol(self, value): """ Sets the value of :py:attr:`labelCol`. """ return self._set(labelCol=value) def setHandleInvalid(self, value): """ Sets the value of :py:attr:`handleInvalid`. """ return self._set(handleInvalid=value) def _create_model(self, java_model): return RFormulaModel(java_model) def __str__(self): formulaStr = self.getFormula() if self.isDefined(self.formula) else "" return "RFormula(%s) (uid=%s)" % (formulaStr, self.uid) class RFormulaModel(JavaModel, _RFormulaParams, JavaMLReadable, JavaMLWritable): """ Model fitted by :py:class:`RFormula`. Fitting is required to determine the factor levels of formula terms. .. versionadded:: 1.5.0 """ def __str__(self): resolvedFormula = self._call_java("resolvedFormula") return "RFormulaModel(%s) (uid=%s)" % (resolvedFormula, self.uid) class _SelectorParams(HasFeaturesCol, HasOutputCol, HasLabelCol): """ Params for :py:class:`Selector` and :py:class:`SelectorModel`. .. versionadded:: 3.1.0 """ selectorType = Param(Params._dummy(), "selectorType", "The selector type. " + "Supported options: numTopFeatures (default), percentile, fpr, fdr, fwe.", typeConverter=TypeConverters.toString) numTopFeatures = \ Param(Params._dummy(), "numTopFeatures", "Number of features that selector will select, ordered by ascending p-value. " + "If the number of features is < numTopFeatures, then this will select " + "all features.", typeConverter=TypeConverters.toInt) percentile = Param(Params._dummy(), "percentile", "Percentile of features that selector " + "will select, ordered by ascending p-value.", typeConverter=TypeConverters.toFloat) fpr = Param(Params._dummy(), "fpr", "The highest p-value for features to be kept.", typeConverter=TypeConverters.toFloat) fdr = Param(Params._dummy(), "fdr", "The upper bound of the expected false discovery rate.", typeConverter=TypeConverters.toFloat) fwe = Param(Params._dummy(), "fwe", "The upper bound of the expected family-wise error rate.", typeConverter=TypeConverters.toFloat) def __init__(self, *args): super(_SelectorParams, self).__init__(*args) self._setDefault(numTopFeatures=50, selectorType="numTopFeatures", percentile=0.1, fpr=0.05, fdr=0.05, fwe=0.05) @since("2.1.0") def getSelectorType(self): """ Gets the value of selectorType or its default value. """ return self.getOrDefault(self.selectorType) @since("2.0.0") def getNumTopFeatures(self): """ Gets the value of numTopFeatures or its default value. """ return self.getOrDefault(self.numTopFeatures) @since("2.1.0") def getPercentile(self): """ Gets the value of percentile or its default value. """ return self.getOrDefault(self.percentile) @since("2.1.0") def getFpr(self): """ Gets the value of fpr or its default value. """ return self.getOrDefault(self.fpr) @since("2.2.0") def getFdr(self): """ Gets the value of fdr or its default value. """ return self.getOrDefault(self.fdr) @since("2.2.0") def getFwe(self): """ Gets the value of fwe or its default value. """ return self.getOrDefault(self.fwe) class _Selector(JavaEstimator, _SelectorParams, JavaMLReadable, JavaMLWritable): """ Mixin for Selectors. """ @since("2.1.0") def setSelectorType(self, value): """ Sets the value of :py:attr:`selectorType`. """ return self._set(selectorType=value) @since("2.0.0") def setNumTopFeatures(self, value): """ Sets the value of :py:attr:`numTopFeatures`. Only applicable when selectorType = "numTopFeatures". """ return self._set(numTopFeatures=value) @since("2.1.0") def setPercentile(self, value): """ Sets the value of :py:attr:`percentile`. Only applicable when selectorType = "percentile". """ return self._set(percentile=value) @since("2.1.0") def setFpr(self, value): """ Sets the value of :py:attr:`fpr`. Only applicable when selectorType = "fpr". """ return self._set(fpr=value) @since("2.2.0") def setFdr(self, value): """ Sets the value of :py:attr:`fdr`. Only applicable when selectorType = "fdr". """ return self._set(fdr=value) @since("2.2.0") def setFwe(self, value): """ Sets the value of :py:attr:`fwe`. Only applicable when selectorType = "fwe". """ return self._set(fwe=value) def setFeaturesCol(self, value): """ Sets the value of :py:attr:`featuresCol`. """ return self._set(featuresCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def setLabelCol(self, value): """ Sets the value of :py:attr:`labelCol`. """ return self._set(labelCol=value) class _SelectorModel(JavaModel, _SelectorParams): """ Mixin for Selector models. """ @since("3.0.0") def setFeaturesCol(self, value): """ Sets the value of :py:attr:`featuresCol`. """ return self._set(featuresCol=value) @since("3.0.0") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @property @since("2.0.0") def selectedFeatures(self): """ List of indices to select (filter). """ return self._call_java("selectedFeatures") @inherit_doc class ChiSqSelector(_Selector, JavaMLReadable, JavaMLWritable): """ Chi-Squared feature selection, which selects categorical features to use for predicting a categorical label. The selector supports different selection methods: `numTopFeatures`, `percentile`, `fpr`, `fdr`, `fwe`. * `numTopFeatures` chooses a fixed number of top features according to a chi-squared test. * `percentile` is similar but chooses a fraction of all features instead of a fixed number. * `fpr` chooses all features whose p-values are below a threshold, thus controlling the false positive rate of selection. * `fdr` uses the `Benjamini-Hochberg procedure <https://en.wikipedia.org/wiki/ False_discovery_rate#Benjamini.E2.80.93Hochberg_procedure>`_ to choose all features whose false discovery rate is below a threshold. * `fwe` chooses all features whose p-values are below a threshold. The threshold is scaled by 1/numFeatures, thus controlling the family-wise error rate of selection. By default, the selection method is `numTopFeatures`, with the default number of top features set to 50. .. deprecated:: 3.1.0 Use UnivariateFeatureSelector .. versionadded:: 2.0.0 Examples -------- >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame( ... [(Vectors.dense([0.0, 0.0, 18.0, 1.0]), 1.0), ... (Vectors.dense([0.0, 1.0, 12.0, 0.0]), 0.0), ... (Vectors.dense([1.0, 0.0, 15.0, 0.1]), 0.0)], ... ["features", "label"]) >>> selector = ChiSqSelector(numTopFeatures=1, outputCol="selectedFeatures") >>> model = selector.fit(df) >>> model.getFeaturesCol() 'features' >>> model.setFeaturesCol("features") ChiSqSelectorModel... >>> model.transform(df).head().selectedFeatures DenseVector([18.0]) >>> model.selectedFeatures [2] >>> chiSqSelectorPath = temp_path + "/chi-sq-selector" >>> selector.save(chiSqSelectorPath) >>> loadedSelector = ChiSqSelector.load(chiSqSelectorPath) >>> loadedSelector.getNumTopFeatures() == selector.getNumTopFeatures() True >>> modelPath = temp_path + "/chi-sq-selector-model" >>> model.save(modelPath) >>> loadedModel = ChiSqSelectorModel.load(modelPath) >>> loadedModel.selectedFeatures == model.selectedFeatures True >>> loadedModel.transform(df).take(1) == model.transform(df).take(1) True """ @keyword_only def __init__(self, *, numTopFeatures=50, featuresCol="features", outputCol=None, labelCol="label", selectorType="numTopFeatures", percentile=0.1, fpr=0.05, fdr=0.05, fwe=0.05): """ __init__(self, \\*, numTopFeatures=50, featuresCol="features", outputCol=None, \ labelCol="label", selectorType="numTopFeatures", percentile=0.1, fpr=0.05, \ fdr=0.05, fwe=0.05) """ super(ChiSqSelector, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.ChiSqSelector", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("2.0.0") def setParams(self, *, numTopFeatures=50, featuresCol="features", outputCol=None, labelCol="labels", selectorType="numTopFeatures", percentile=0.1, fpr=0.05, fdr=0.05, fwe=0.05): """ setParams(self, \\*, numTopFeatures=50, featuresCol="features", outputCol=None, \ labelCol="labels", selectorType="numTopFeatures", percentile=0.1, fpr=0.05, \ fdr=0.05, fwe=0.05) Sets params for this ChiSqSelector. """ kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return ChiSqSelectorModel(java_model) class ChiSqSelectorModel(_SelectorModel, JavaMLReadable, JavaMLWritable): """ Model fitted by :py:class:`ChiSqSelector`. .. versionadded:: 2.0.0 """ @inherit_doc class VectorSizeHint(JavaTransformer, HasInputCol, HasHandleInvalid, JavaMLReadable, JavaMLWritable): """ A feature transformer that adds size information to the metadata of a vector column. VectorAssembler needs size information for its input columns and cannot be used on streaming dataframes without this metadata. .. versionadded:: 2.3.0 Notes ----- VectorSizeHint modifies `inputCol` to include size metadata and does not have an outputCol. Examples -------- >>> from pyspark.ml.linalg import Vectors >>> from pyspark.ml import Pipeline, PipelineModel >>> data = [(Vectors.dense([1., 2., 3.]), 4.)] >>> df = spark.createDataFrame(data, ["vector", "float"]) >>> >>> sizeHint = VectorSizeHint(inputCol="vector", size=3, handleInvalid="skip") >>> vecAssembler = VectorAssembler(inputCols=["vector", "float"], outputCol="assembled") >>> pipeline = Pipeline(stages=[sizeHint, vecAssembler]) >>> >>> pipelineModel = pipeline.fit(df) >>> pipelineModel.transform(df).head().assembled DenseVector([1.0, 2.0, 3.0, 4.0]) >>> vectorSizeHintPath = temp_path + "/vector-size-hint-pipeline" >>> pipelineModel.save(vectorSizeHintPath) >>> loadedPipeline = PipelineModel.load(vectorSizeHintPath) >>> loaded = loadedPipeline.transform(df).head().assembled >>> expected = pipelineModel.transform(df).head().assembled >>> loaded == expected True """ size = Param(Params._dummy(), "size", "Size of vectors in column.", typeConverter=TypeConverters.toInt) handleInvalid = Param(Params._dummy(), "handleInvalid", "How to handle invalid vectors in inputCol. Invalid vectors include " "nulls and vectors with the wrong size. The options are `skip` (filter " "out rows with invalid vectors), `error` (throw an error) and " "`optimistic` (do not check the vector size, and keep all rows). " "`error` by default.", TypeConverters.toString) @keyword_only def __init__(self, *, inputCol=None, size=None, handleInvalid="error"): """ __init__(self, \\*, inputCol=None, size=None, handleInvalid="error") """ super(VectorSizeHint, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorSizeHint", self.uid) self._setDefault(handleInvalid="error") self.setParams(**self._input_kwargs) @keyword_only @since("2.3.0") def setParams(self, *, inputCol=None, size=None, handleInvalid="error"): """ setParams(self, \\*, inputCol=None, size=None, handleInvalid="error") Sets params for this VectorSizeHint. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("2.3.0") def getSize(self): """ Gets size param, the size of vectors in `inputCol`.""" return self.getOrDefault(self.size) @since("2.3.0") def setSize(self, value): """ Sets size param, the size of vectors in `inputCol`.""" return self._set(size=value) def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def setHandleInvalid(self, value): """ Sets the value of :py:attr:`handleInvalid`. """ return self._set(handleInvalid=value) class _VarianceThresholdSelectorParams(HasFeaturesCol, HasOutputCol): """ Params for :py:class:`VarianceThresholdSelector` and :py:class:`VarianceThresholdSelectorModel`. .. versionadded:: 3.1.0 """ varianceThreshold = Param(Params._dummy(), "varianceThreshold", "Param for variance threshold. Features with a variance not " + "greater than this threshold will be removed. The default value " + "is 0.0.", typeConverter=TypeConverters.toFloat) @since("3.1.0") def getVarianceThreshold(self): """ Gets the value of varianceThreshold or its default value. """ return self.getOrDefault(self.varianceThreshold) @inherit_doc class VarianceThresholdSelector(JavaEstimator, _VarianceThresholdSelectorParams, JavaMLReadable, JavaMLWritable): """ Feature selector that removes all low-variance features. Features with a variance not greater than the threshold will be removed. The default is to keep all features with non-zero variance, i.e. remove the features that have the same value in all samples. .. versionadded:: 3.1.0 Examples -------- >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame( ... [(Vectors.dense([6.0, 7.0, 0.0, 7.0, 6.0, 0.0]),), ... (Vectors.dense([0.0, 9.0, 6.0, 0.0, 5.0, 9.0]),), ... (Vectors.dense([0.0, 9.0, 3.0, 0.0, 5.0, 5.0]),), ... (Vectors.dense([0.0, 9.0, 8.0, 5.0, 6.0, 4.0]),), ... (Vectors.dense([8.0, 9.0, 6.0, 5.0, 4.0, 4.0]),), ... (Vectors.dense([8.0, 9.0, 6.0, 0.0, 0.0, 0.0]),)], ... ["features"]) >>> selector = VarianceThresholdSelector(varianceThreshold=8.2, outputCol="selectedFeatures") >>> model = selector.fit(df) >>> model.getFeaturesCol() 'features' >>> model.setFeaturesCol("features") VarianceThresholdSelectorModel... >>> model.transform(df).head().selectedFeatures DenseVector([6.0, 7.0, 0.0]) >>> model.selectedFeatures [0, 3, 5] >>> varianceThresholdSelectorPath = temp_path + "/variance-threshold-selector" >>> selector.save(varianceThresholdSelectorPath) >>> loadedSelector = VarianceThresholdSelector.load(varianceThresholdSelectorPath) >>> loadedSelector.getVarianceThreshold() == selector.getVarianceThreshold() True >>> modelPath = temp_path + "/variance-threshold-selector-model" >>> model.save(modelPath) >>> loadedModel = VarianceThresholdSelectorModel.load(modelPath) >>> loadedModel.selectedFeatures == model.selectedFeatures True >>> loadedModel.transform(df).take(1) == model.transform(df).take(1) True """ @keyword_only def __init__(self, *, featuresCol="features", outputCol=None, varianceThreshold=0.0): """ __init__(self, \\*, featuresCol="features", outputCol=None, varianceThreshold=0.0) """ super(VarianceThresholdSelector, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.feature.VarianceThresholdSelector", self.uid) self._setDefault(varianceThreshold=0.0) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("3.1.0") def setParams(self, *, featuresCol="features", outputCol=None, varianceThreshold=0.0): """ setParams(self, \\*, featuresCol="features", outputCol=None, varianceThreshold=0.0) Sets params for this VarianceThresholdSelector. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("3.1.0") def setVarianceThreshold(self, value): """ Sets the value of :py:attr:`varianceThreshold`. """ return self._set(varianceThreshold=value) @since("3.1.0") def setFeaturesCol(self, value): """ Sets the value of :py:attr:`featuresCol`. """ return self._set(featuresCol=value) @since("3.1.0") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def _create_model(self, java_model): return VarianceThresholdSelectorModel(java_model) class VarianceThresholdSelectorModel(JavaModel, _VarianceThresholdSelectorParams, JavaMLReadable, JavaMLWritable): """ Model fitted by :py:class:`VarianceThresholdSelector`. .. versionadded:: 3.1.0 """ @since("3.1.0") def setFeaturesCol(self, value): """ Sets the value of :py:attr:`featuresCol`. """ return self._set(featuresCol=value) @since("3.1.0") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @property @since("3.1.0") def selectedFeatures(self): """ List of indices to select (filter). """ return self._call_java("selectedFeatures") class _UnivariateFeatureSelectorParams(HasFeaturesCol, HasOutputCol, HasLabelCol): """ Params for :py:class:`UnivariateFeatureSelector` and :py:class:`UnivariateFeatureSelectorModel`. .. versionadded:: 3.1.0 """ featureType = Param(Params._dummy(), "featureType", "The feature type. " + "Supported options: categorical, continuous.", typeConverter=TypeConverters.toString) labelType = Param(Params._dummy(), "labelType", "The label type. " + "Supported options: categorical, continuous.", typeConverter=TypeConverters.toString) selectionMode = Param(Params._dummy(), "selectionMode", "The selection mode. " + "Supported options: numTopFeatures (default), percentile, fpr, " + "fdr, fwe.", typeConverter=TypeConverters.toString) selectionThreshold = Param(Params._dummy(), "selectionThreshold", "The upper bound of the " + "features that selector will select.", typeConverter=TypeConverters.toFloat) def __init__(self, *args): super(_UnivariateFeatureSelectorParams, self).__init__(*args) self._setDefault(selectionMode="numTopFeatures") @since("3.1.1") def getFeatureType(self): """ Gets the value of featureType or its default value. """ return self.getOrDefault(self.featureType) @since("3.1.1") def getLabelType(self): """ Gets the value of labelType or its default value. """ return self.getOrDefault(self.labelType) @since("3.1.1") def getSelectionMode(self): """ Gets the value of selectionMode or its default value. """ return self.getOrDefault(self.selectionMode) @since("3.1.1") def getSelectionThreshold(self): """ Gets the value of selectionThreshold or its default value. """ return self.getOrDefault(self.selectionThreshold) @inherit_doc class UnivariateFeatureSelector(JavaEstimator, _UnivariateFeatureSelectorParams, JavaMLReadable, JavaMLWritable): """ UnivariateFeatureSelector The user can set `featureType` and `labelType`, and Spark will pick the score function based on the specified `featureType` and `labelType`. The following combination of `featureType` and `labelType` are supported: - `featureType` `categorical` and `labelType` `categorical`, Spark uses chi-squared, i.e. chi2 in sklearn. - `featureType` `continuous` and `labelType` `categorical`, Spark uses ANOVATest, i.e. f_classif in sklearn. - `featureType` `continuous` and `labelType` `continuous`, Spark uses F-value, i.e. f_regression in sklearn. The `UnivariateFeatureSelector` supports different selection modes: `numTopFeatures`, `percentile`, `fpr`, `fdr`, `fwe`. - `numTopFeatures` chooses a fixed number of top features according to a according to a hypothesis. - `percentile` is similar but chooses a fraction of all features instead of a fixed number. - `fpr` chooses all features whose p-values are below a threshold, thus controlling the false positive rate of selection. - `fdr` uses the `Benjamini-Hochberg procedure \ <https://en.wikipedia.org/wiki/False_discovery_rate#Benjamini.E2.80.93Hochberg_procedure>`_ to choose all features whose false discovery rate is below a threshold. - `fwe` chooses all features whose p-values are below a threshold. The threshold is scaled by 1 / `numFeatures`, thus controlling the family-wise error rate of selection. By default, the selection mode is `numTopFeatures`. .. versionadded:: 3.1.1 Examples -------- >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame( ... [(Vectors.dense([1.7, 4.4, 7.6, 5.8, 9.6, 2.3]), 3.0), ... (Vectors.dense([8.8, 7.3, 5.7, 7.3, 2.2, 4.1]), 2.0), ... (Vectors.dense([1.2, 9.5, 2.5, 3.1, 8.7, 2.5]), 1.0), ... (Vectors.dense([3.7, 9.2, 6.1, 4.1, 7.5, 3.8]), 2.0), ... (Vectors.dense([8.9, 5.2, 7.8, 8.3, 5.2, 3.0]), 4.0), ... (Vectors.dense([7.9, 8.5, 9.2, 4.0, 9.4, 2.1]), 4.0)], ... ["features", "label"]) >>> selector = UnivariateFeatureSelector(outputCol="selectedFeatures") >>> selector.setFeatureType("continuous").setLabelType("categorical").setSelectionThreshold(1) UnivariateFeatureSelector... >>> model = selector.fit(df) >>> model.getFeaturesCol() 'features' >>> model.setFeaturesCol("features") UnivariateFeatureSelectorModel... >>> model.transform(df).head().selectedFeatures DenseVector([7.6]) >>> model.selectedFeatures [2] >>> selectorPath = temp_path + "/selector" >>> selector.save(selectorPath) >>> loadedSelector = UnivariateFeatureSelector.load(selectorPath) >>> loadedSelector.getSelectionThreshold() == selector.getSelectionThreshold() True >>> modelPath = temp_path + "/selector-model" >>> model.save(modelPath) >>> loadedModel = UnivariateFeatureSelectorModel.load(modelPath) >>> loadedModel.selectedFeatures == model.selectedFeatures True >>> loadedModel.transform(df).take(1) == model.transform(df).take(1) True """ @keyword_only def __init__(self, *, featuresCol="features", outputCol=None, labelCol="label", selectionMode="numTopFeatures"): """ __init__(self, \\*, featuresCol="features", outputCol=None, \ labelCol="label", selectionMode="numTopFeatures") """ super(UnivariateFeatureSelector, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.UnivariateFeatureSelector", self.uid) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("3.1.1") def setParams(self, *, featuresCol="features", outputCol=None, labelCol="labels", selectionMode="numTopFeatures"): """ setParams(self, \\*, featuresCol="features", outputCol=None, \ labelCol="labels", selectionMode="numTopFeatures") Sets params for this UnivariateFeatureSelector. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("3.1.1") def setFeatureType(self, value): """ Sets the value of :py:attr:`featureType`. """ return self._set(featureType=value) @since("3.1.1") def setLabelType(self, value): """ Sets the value of :py:attr:`labelType`. """ return self._set(labelType=value) @since("3.1.1") def setSelectionMode(self, value): """ Sets the value of :py:attr:`selectionMode`. """ return self._set(selectionMode=value) @since("3.1.1") def setSelectionThreshold(self, value): """ Sets the value of :py:attr:`selectionThreshold`. """ return self._set(selectionThreshold=value) def setFeaturesCol(self, value): """ Sets the value of :py:attr:`featuresCol`. """ return self._set(featuresCol=value) def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def setLabelCol(self, value): """ Sets the value of :py:attr:`labelCol`. """ return self._set(labelCol=value) def _create_model(self, java_model): return UnivariateFeatureSelectorModel(java_model) class UnivariateFeatureSelectorModel(JavaModel, _UnivariateFeatureSelectorParams, JavaMLReadable, JavaMLWritable): """ Model fitted by :py:class:`UnivariateFeatureSelector`. .. versionadded:: 3.1.1 """ @since("3.1.1") def setFeaturesCol(self, value): """ Sets the value of :py:attr:`featuresCol`. """ return self._set(featuresCol=value) @since("3.1.1") def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) @property @since("3.1.1") def selectedFeatures(self): """ List of indices to select (filter). """ return self._call_java("selectedFeatures") if __name__ == "__main__": import doctest import sys import tempfile import pyspark.ml.feature from pyspark.sql import Row, SparkSession globs = globals().copy() features = pyspark.ml.feature.__dict__.copy() globs.update(features) # The small batch size here ensures that we see multiple batches, # even in these small test examples: spark = SparkSession.builder\ .master("local[2]")\ .appName("ml.feature tests")\ .getOrCreate() sc = spark.sparkContext globs['sc'] = sc globs['spark'] = spark testData = sc.parallelize([Row(id=0, label="a"), Row(id=1, label="b"), Row(id=2, label="c"), Row(id=3, label="a"), Row(id=4, label="a"), Row(id=5, label="c")], 2) globs['stringIndDf'] = spark.createDataFrame(testData) temp_path = tempfile.mkdtemp() globs['temp_path'] = temp_path try: (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) spark.stop() finally: from shutil import rmtree try: rmtree(temp_path) except OSError: pass if failure_count: sys.exit(-1)
apache-2.0
bloyl/mne-python
mne/utils/check.py
1
27421
# -*- coding: utf-8 -*- """The check functions.""" # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # # License: BSD (3-clause) from builtins import input # no-op here but facilitates testing from difflib import get_close_matches from distutils.version import LooseVersion import operator import os import os.path as op from pathlib import Path import sys import warnings import numpy as np from ..fixes import _median_complex from ._logging import warn, logger def _ensure_int(x, name='unknown', must_be='an int'): """Ensure a variable is an integer.""" # This is preferred over numbers.Integral, see: # https://github.com/scipy/scipy/pull/7351#issuecomment-299713159 try: # someone passing True/False is much more likely to be an error than # intentional usage if isinstance(x, bool): raise TypeError() x = int(operator.index(x)) except TypeError: raise TypeError('%s must be %s, got %s' % (name, must_be, type(x))) return x def check_fname(fname, filetype, endings, endings_err=()): """Enforce MNE filename conventions. Parameters ---------- fname : str Name of the file. filetype : str Type of file. e.g., ICA, Epochs etc. endings : tuple Acceptable endings for the filename. endings_err : tuple Obligatory possible endings for the filename. """ _validate_type(fname, 'path-like', 'fname') fname = str(fname) if len(endings_err) > 0 and not fname.endswith(endings_err): print_endings = ' or '.join([', '.join(endings_err[:-1]), endings_err[-1]]) raise IOError('The filename (%s) for file type %s must end with %s' % (fname, filetype, print_endings)) print_endings = ' or '.join([', '.join(endings[:-1]), endings[-1]]) if not fname.endswith(endings): warn('This filename (%s) does not conform to MNE naming conventions. ' 'All %s files should end with %s' % (fname, filetype, print_endings)) def check_version(library, min_version='0.0'): r"""Check minimum library version required. Parameters ---------- library : str The library name to import. Must have a ``__version__`` property. min_version : str The minimum version string. Anything that matches ``'(\d+ | [a-z]+ | \.)'``. Can also be empty to skip version check (just check for library presence). Returns ------- ok : bool True if the library exists with at least the specified version. """ ok = True try: library = __import__(library) except ImportError: ok = False else: if min_version and \ LooseVersion(library.__version__) < LooseVersion(min_version): ok = False return ok def _require_version(lib, what, version='0.0'): """Require library for a purpose.""" if not check_version(lib, version): extra = f' (version >= {version})' if version != '0.0' else '' raise ImportError(f'The {lib} package{extra} is required to {what}') def _check_mayavi_version(min_version='4.3.0'): """Check mayavi version.""" if not check_version('mayavi', min_version): raise RuntimeError("Need mayavi >= %s" % min_version) # adapted from scikit-learn utils/validation.py def check_random_state(seed): """Turn seed into a numpy.random.mtrand.RandomState instance. If seed is None, return the RandomState singleton used by np.random.mtrand. If seed is an int, return a new RandomState instance seeded with seed. If seed is already a RandomState instance, return it. Otherwise raise ValueError. """ if seed is None or seed is np.random: return np.random.mtrand._rand if isinstance(seed, (int, np.integer)): return np.random.mtrand.RandomState(seed) if isinstance(seed, np.random.mtrand.RandomState): return seed try: # Generator is only available in numpy >= 1.17 if isinstance(seed, np.random.Generator): return seed except AttributeError: pass raise ValueError('%r cannot be used to seed a ' 'numpy.random.mtrand.RandomState instance' % seed) def _check_event_id(event_id, events): """Check event_id and convert to default format.""" # check out event_id dict if event_id is None: # convert to int to make typing-checks happy event_id = list(np.unique(events[:, 2])) if isinstance(event_id, dict): for key in event_id.keys(): _validate_type(key, str, 'Event names') event_id = {key: _ensure_int(val, 'event_id[%s]' % key) for key, val in event_id.items()} elif isinstance(event_id, list): event_id = [_ensure_int(v, 'event_id[%s]' % vi) for vi, v in enumerate(event_id)] event_id = dict(zip((str(i) for i in event_id), event_id)) else: event_id = _ensure_int(event_id, 'event_id') event_id = {str(event_id): event_id} return event_id def _check_fname(fname, overwrite=False, must_exist=False, name='File', need_dir=False): """Check for file existence, and return string of its absolute path.""" _validate_type(fname, 'path-like', name) if op.exists(fname): if not overwrite: raise FileExistsError('Destination file exists. Please use option ' '"overwrite=True" to force overwriting.') elif overwrite != 'read': logger.info('Overwriting existing file.') if must_exist: if need_dir: if not op.isdir(fname): raise IOError( f'Need a directory for {name} but found a file ' f'at {fname}') else: if not op.isfile(fname): raise IOError( f'Need a file for {name} but found a directory ' f'at {fname}') if not os.access(fname, os.R_OK): raise PermissionError( f'{name} does not have read permissions: {fname}') elif must_exist: raise FileNotFoundError(f'{name} does not exist: {fname}') return str(op.abspath(fname)) def _check_subject(first, second, *, raise_error=True, first_kind='class subject attribute', second_kind='input subject'): """Get subject name from class.""" if second is not None: _validate_type(second, 'str', "subject input") if first is not None and first != second: raise ValueError( f'{first_kind} ({repr(first)}) did not match ' f'{second_kind} ({second})') return second elif first is not None: _validate_type( first, 'str', f"Either {second_kind} subject or {first_kind}") return first elif raise_error is True: raise ValueError(f'Neither {second_kind} subject nor {first_kind} ' 'was a string') return None def _check_preload(inst, msg): """Ensure data are preloaded.""" from ..epochs import BaseEpochs from ..evoked import Evoked from ..time_frequency import _BaseTFR if isinstance(inst, (_BaseTFR, Evoked)): pass else: name = "epochs" if isinstance(inst, BaseEpochs) else 'raw' if not inst.preload: raise RuntimeError( "By default, MNE does not load data into main memory to " "conserve resources. " + msg + ' requires %s data to be ' 'loaded. Use preload=True (or string) in the constructor or ' '%s.load_data().' % (name, name)) def _check_compensation_grade(info1, info2, name1, name2='data', ch_names=None): """Ensure that objects have same compensation_grade.""" from ..io import Info from ..io.pick import pick_channels, pick_info from ..io.compensator import get_current_comp for t_info in (info1, info2): if t_info is None: return assert isinstance(t_info, Info), t_info # or internal code is wrong if ch_names is not None: info1 = info1.copy() info2 = info2.copy() # pick channels for t_info in [info1, info2]: if t_info['comps']: t_info['comps'] = [] picks = pick_channels(t_info['ch_names'], ch_names) pick_info(t_info, picks, copy=False) # "or 0" here aliases None -> 0, as they are equivalent grade1 = get_current_comp(info1) or 0 grade2 = get_current_comp(info2) or 0 # perform check if grade1 != grade2: raise RuntimeError( 'Compensation grade of %s (%s) and %s (%s) do not match' % (name1, grade1, name2, grade2)) def _check_pylsl_installed(strict=True): """Aux function.""" try: import pylsl return pylsl except ImportError: if strict is True: raise RuntimeError('For this functionality to work, the pylsl ' 'library is required.') else: return False def _check_pandas_installed(strict=True): """Aux function.""" try: import pandas return pandas except ImportError: if strict is True: raise RuntimeError('For this functionality to work, the Pandas ' 'library is required.') else: return False def _check_eeglabio_installed(strict=True): """Aux function.""" try: import eeglabio return eeglabio except ImportError: if strict is True: raise RuntimeError('For this functionality to work, the eeglabio ' 'library is required.') else: return False def _check_pandas_index_arguments(index, valid): """Check pandas index arguments.""" if index is None: return if isinstance(index, str): index = [index] if not isinstance(index, list): raise TypeError('index must be `None` or a string or list of strings,' ' got type {}.'.format(type(index))) invalid = set(index) - set(valid) if invalid: plural = ('is not a valid option', 'are not valid options')[int(len(invalid) > 1)] raise ValueError('"{}" {}. Valid index options are `None`, "{}".' .format('", "'.join(invalid), plural, '", "'.join(valid))) return index def _check_time_format(time_format, valid, meas_date=None): """Check time_format argument.""" if time_format not in valid and time_format is not None: valid_str = '", "'.join(valid) raise ValueError('"{}" is not a valid time format. Valid options are ' '"{}" and None.'.format(time_format, valid_str)) # allow datetime only if meas_date available if time_format == 'datetime' and meas_date is None: warn("Cannot convert to Datetime when raw.info['meas_date'] is " "None. Falling back to Timedelta.") time_format = 'timedelta' return time_format def _check_ch_locs(chs): """Check if channel locations exist. Parameters ---------- chs : dict The channels from info['chs'] """ locs3d = np.array([ch['loc'][:3] for ch in chs]) return not ((locs3d == 0).all() or (~np.isfinite(locs3d)).all() or np.allclose(locs3d, 0.)) def _is_numeric(n): return isinstance(n, (np.integer, np.floating, int, float)) class _IntLike(object): @classmethod def __instancecheck__(cls, other): try: _ensure_int(other) except TypeError: return False else: return True int_like = _IntLike() path_like = (str, Path, os.PathLike) class _Callable(object): @classmethod def __instancecheck__(cls, other): return callable(other) _multi = { 'str': (str,), 'numeric': (np.floating, float, int_like), 'path-like': path_like, 'int-like': (int_like,), 'callable': (_Callable(),), } def _validate_type(item, types=None, item_name=None, type_name=None): """Validate that `item` is an instance of `types`. Parameters ---------- item : object The thing to be checked. types : type | str | tuple of types | tuple of str The types to be checked against. If str, must be one of {'int', 'str', 'numeric', 'info', 'path-like', 'callable'}. item_name : str | None Name of the item to show inside the error message. type_name : str | None Possible types to show inside the error message that the checked item can be. """ if types == "int": _ensure_int(item, name=item_name) return # terminate prematurely elif types == "info": from mne.io import Info as types if not isinstance(types, (list, tuple)): types = [types] check_types = sum(((type(None),) if type_ is None else (type_,) if not isinstance(type_, str) else _multi[type_] for type_ in types), ()) if not isinstance(item, check_types): if type_name is None: type_name = ['None' if cls_ is None else cls_.__name__ if not isinstance(cls_, str) else cls_ for cls_ in types] if len(type_name) == 1: type_name = type_name[0] elif len(type_name) == 2: type_name = ' or '.join(type_name) else: type_name[-1] = 'or ' + type_name[-1] type_name = ', '.join(type_name) _item_name = 'Item' if item_name is None else item_name raise TypeError(f"{_item_name} must be an instance of {type_name}, " f"got {type(item)} instead") def _check_path_like(item): """Validate that `item` is `path-like`. Parameters ---------- item : object The thing to be checked. Returns ------- bool ``True`` if `item` is a `path-like` object; ``False`` otherwise. """ try: _validate_type(item, types='path-like') return True except TypeError: return False def _check_if_nan(data, msg=" to be plotted"): """Raise if any of the values are NaN.""" if not np.isfinite(data).all(): raise ValueError("Some of the values {} are NaN.".format(msg)) def _check_info_inv(info, forward, data_cov=None, noise_cov=None): """Return good channels common to forward model and covariance matrices.""" from .. import pick_types # get a list of all channel names: fwd_ch_names = forward['info']['ch_names'] # handle channels from forward model and info: ch_names = _compare_ch_names(info['ch_names'], fwd_ch_names, info['bads']) # make sure that no reference channels are left: ref_chs = pick_types(info, meg=False, ref_meg=True) ref_chs = [info['ch_names'][ch] for ch in ref_chs] ch_names = [ch for ch in ch_names if ch not in ref_chs] # inform about excluding channels: if (data_cov is not None and set(info['bads']) != set(data_cov['bads']) and (len(set(ch_names).intersection(data_cov['bads'])) > 0)): logger.info('info["bads"] and data_cov["bads"] do not match, ' 'excluding bad channels from both.') if (noise_cov is not None and set(info['bads']) != set(noise_cov['bads']) and (len(set(ch_names).intersection(noise_cov['bads'])) > 0)): logger.info('info["bads"] and noise_cov["bads"] do not match, ' 'excluding bad channels from both.') # handle channels from data cov if data cov is not None # Note: data cov is supposed to be None in tf_lcmv if data_cov is not None: ch_names = _compare_ch_names(ch_names, data_cov.ch_names, data_cov['bads']) # handle channels from noise cov if noise cov available: if noise_cov is not None: ch_names = _compare_ch_names(ch_names, noise_cov.ch_names, noise_cov['bads']) picks = [info['ch_names'].index(k) for k in ch_names if k in info['ch_names']] return picks def _compare_ch_names(names1, names2, bads): """Return channel names of common and good channels.""" ch_names = [ch for ch in names1 if ch not in bads and ch in names2] return ch_names def _check_channels_spatial_filter(ch_names, filters): """Return data channel indices to be used with spatial filter. Unlike ``pick_channels``, this respects the order of ch_names. """ sel = [] # first check for channel discrepancies between filter and data: for ch_name in filters['ch_names']: if ch_name not in ch_names: raise ValueError('The spatial filter was computed with channel %s ' 'which is not present in the data. You should ' 'compute a new spatial filter restricted to the ' 'good data channels.' % ch_name) # then compare list of channels and get selection based on data: sel = [ii for ii, ch_name in enumerate(ch_names) if ch_name in filters['ch_names']] return sel def _check_rank(rank): """Check rank parameter.""" _validate_type(rank, (None, dict, str), 'rank') if isinstance(rank, str): if rank not in ['full', 'info']: raise ValueError('rank, if str, must be "full" or "info", ' 'got %s' % (rank,)) return rank def _check_one_ch_type(method, info, forward, data_cov=None, noise_cov=None): """Check number of sensor types and presence of noise covariance matrix.""" from ..cov import make_ad_hoc_cov, Covariance from ..time_frequency.csd import CrossSpectralDensity from ..io.pick import pick_info from ..channels.channels import _contains_ch_type if isinstance(data_cov, CrossSpectralDensity): _validate_type(noise_cov, [None, CrossSpectralDensity], 'noise_cov') # FIXME picks = list(range(len(data_cov.ch_names))) info_pick = info else: _validate_type(noise_cov, [None, Covariance], 'noise_cov') picks = _check_info_inv(info, forward, data_cov=data_cov, noise_cov=noise_cov) info_pick = pick_info(info, picks) ch_types =\ [_contains_ch_type(info_pick, tt) for tt in ('mag', 'grad', 'eeg')] if sum(ch_types) > 1: if noise_cov is None: raise ValueError('Source reconstruction with several sensor types' ' requires a noise covariance matrix to be ' 'able to apply whitening.') if noise_cov is None: noise_cov = make_ad_hoc_cov(info_pick, std=1.) allow_mismatch = True else: noise_cov = noise_cov.copy() if isinstance(noise_cov, Covariance) and 'estimator' in noise_cov: del noise_cov['estimator'] allow_mismatch = False _validate_type(noise_cov, (Covariance, CrossSpectralDensity), 'noise_cov') return noise_cov, picks, allow_mismatch def _check_depth(depth, kind='depth_mne'): """Check depth options.""" from ..defaults import _handle_default if not isinstance(depth, dict): depth = dict(exp=None if depth is None else float(depth)) return _handle_default(kind, depth) def _check_option(parameter, value, allowed_values, extra=''): """Check the value of a parameter against a list of valid options. Return the value if it is valid, otherwise raise a ValueError with a readable error message. Parameters ---------- parameter : str The name of the parameter to check. This is used in the error message. value : any type The value of the parameter to check. allowed_values : list The list of allowed values for the parameter. extra : str Extra string to append to the invalid value sentence, e.g. "when using ico mode". Raises ------ ValueError When the value of the parameter is not one of the valid options. Returns ------- value : any type The value if it is valid. """ if value in allowed_values: return value # Prepare a nice error message for the user extra = ' ' + extra if extra else extra msg = ("Invalid value for the '{parameter}' parameter{extra}. " '{options}, but got {value!r} instead.') allowed_values = list(allowed_values) # e.g., if a dict was given if len(allowed_values) == 1: options = f'The only allowed value is {repr(allowed_values[0])}' else: options = 'Allowed values are ' options += ', '.join([f'{repr(v)}' for v in allowed_values[:-1]]) options += f', and {repr(allowed_values[-1])}' raise ValueError(msg.format(parameter=parameter, options=options, value=value, extra=extra)) def _check_all_same_channel_names(instances): """Check if a collection of instances all have the same channels.""" ch_names = instances[0].info["ch_names"] for inst in instances: if ch_names != inst.info["ch_names"]: return False return True def _check_combine(mode, valid=('mean', 'median', 'std')): if mode == "mean": def fun(data): return np.mean(data, axis=0) elif mode == "std": def fun(data): return np.std(data, axis=0) elif mode == "median" or mode == np.median: def fun(data): return _median_complex(data, axis=0) elif callable(mode): fun = mode else: raise ValueError("Combine option must be " + ", ".join(valid) + " or callable, got %s (type %s)." % (mode, type(mode))) return fun def _check_src_normal(pick_ori, src): from ..source_space import SourceSpaces _validate_type(src, SourceSpaces, 'src') if pick_ori == 'normal' and src.kind not in ('surface', 'discrete'): raise RuntimeError('Normal source orientation is supported only for ' 'surface or discrete SourceSpaces, got type ' '%s' % (src.kind,)) def _check_stc_units(stc, threshold=1e-7): # 100 nAm threshold for warning max_cur = np.max(np.abs(stc.data)) if max_cur > threshold: warn('The maximum current magnitude is %0.1f nAm, which is very large.' ' Are you trying to apply the forward model to noise-normalized ' '(dSPM, sLORETA, or eLORETA) values? The result will only be ' 'correct if currents (in units of Am) are used.' % (1e9 * max_cur)) def _check_pyqt5_version(): bad = True try: from PyQt5.Qt import PYQT_VERSION_STR as version except Exception: version = 'unknown' else: if LooseVersion(version) >= LooseVersion('5.10'): bad = False bad &= sys.platform == 'darwin' if bad: warn('macOS users should use PyQt5 >= 5.10 for GUIs, got %s. ' 'Please upgrade e.g. with:\n\n' ' pip install "PyQt5>=5.10,<5.14"\n' % (version,)) return version def _check_sphere(sphere, info=None, sphere_units='m'): from ..defaults import HEAD_SIZE_DEFAULT from ..bem import fit_sphere_to_headshape, ConductorModel, get_fitting_dig if sphere is None: sphere = HEAD_SIZE_DEFAULT if info is not None: # Decide if we have enough dig points to do the auto fit try: get_fitting_dig(info, 'extra', verbose='error') except (RuntimeError, ValueError): pass else: sphere = 'auto' if isinstance(sphere, str): if sphere != 'auto': raise ValueError('sphere, if str, must be "auto", got %r' % (sphere)) R, r0, _ = fit_sphere_to_headshape(info, verbose=False, units='m') sphere = tuple(r0) + (R,) sphere_units = 'm' elif isinstance(sphere, ConductorModel): if not sphere['is_sphere'] or len(sphere['layers']) == 0: raise ValueError('sphere, if a ConductorModel, must be spherical ' 'with multiple layers, not a BEM or single-layer ' 'sphere (got %s)' % (sphere,)) sphere = tuple(sphere['r0']) + (sphere['layers'][0]['rad'],) sphere_units = 'm' sphere = np.array(sphere, dtype=float) if sphere.shape == (): sphere = np.concatenate([[0.] * 3, [sphere]]) if sphere.shape != (4,): raise ValueError('sphere must be float or 1D array of shape (4,), got ' 'array-like of shape %s' % (sphere.shape,)) _check_option('sphere_units', sphere_units, ('m', 'mm')) if sphere_units == 'mm': sphere /= 1000. sphere = np.array(sphere, float) return sphere def _check_freesurfer_home(): from .config import get_config fs_home = get_config('FREESURFER_HOME') if fs_home is None: raise RuntimeError( 'The FREESURFER_HOME environment variable is not set.') return fs_home def _suggest(val, options, cutoff=0.66): options = get_close_matches(val, options, cutoff=cutoff) if len(options) == 0: return '' elif len(options) == 1: return ' Did you mean %r?' % (options[0],) else: return ' Did you mean one of %r?' % (options,) def _check_on_missing(on_missing, name='on_missing'): _validate_type(on_missing, str, name) _check_option(name, on_missing, ['raise', 'warn', 'ignore']) def _on_missing(on_missing, msg, name='on_missing', error_klass=None): _check_on_missing(on_missing, name) error_klass = ValueError if error_klass is None else error_klass on_missing = 'raise' if on_missing == 'error' else on_missing on_missing = 'warn' if on_missing == 'warning' else on_missing if on_missing == 'raise': raise error_klass(msg) elif on_missing == 'warn': warn(msg) else: # Ignore assert on_missing == 'ignore' def _safe_input(msg, *, alt=None, use=None): try: return input(msg) except EOFError: # MATLAB or other non-stdin if use is not None: return use raise RuntimeError( f'Could not use input() to get a response to:\n{msg}\n' f'You can {alt} to avoid this error.') def _ensure_events(events): events_type = type(events) with warnings.catch_warnings(record=True): warnings.simplefilter('ignore') # deprecation for object array events = np.asarray(events) if not np.issubdtype(events.dtype, np.integer): raise TypeError('events should be a NumPy array of integers, ' f'got {events_type}') if events.ndim != 2 or events.shape[1] != 3: raise ValueError( f'events must be of shape (N, 3), got {events.shape}') return events
bsd-3-clause
nealchenzhang/EODAnalyzer
plot.py
1
2672
# -*- coding: utf-8 -*- #!/usr/bin/env python3 # -*- coding: utf-8 -*- ############################################################################### # # Created on Tue Jun 13 10:04:11 2017 # @author: nealcz @Aian_fund # This program is personal trading platform desiged when employed in # Aihui Asset Management as a quantatitive analyst. # # Contact: # Name: Chen Zhang (Neal) # Mobile: (+86) 139-1706-0712 # E-mail: nealzc1991@gmail.com ############################################################################### import pandas as pd import numpy as np import os import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.finance as mpf import matplotlib.dates as mdates import datetime as dt ### PP Data os.chdir('D:\\Neal\\EODAnalyzer') pp = pd.read_excel('PP1709.xlsx') pp = pp.iloc[:, 0:5] columns = ['Timestamp', 'Open', 'High', 'Low', 'Close'] pp.columns = columns pp = pp.drop(pp.index[-2:]) ppp = pp.iloc[-300:] ppp = ppp.set_index('Timestamp', drop=True) ### Orders df_Orders = x.backtest_trading('pp') df_Orders.loc[:, 'Timestamp'] = df_Orders.loc[:, 'Timestamp'].apply(pd.Timestamp) df_Orders = df_Orders.set_index('Timestamp', drop=True) pd.merge(ppp, df_Orders, how='inner', on=[ppp.index]) Order_4_plot = pd.concat([ppp, df_Orders], axis=1, join='inner') ### Plot Order_4_plot = Order_4_plot.reset_index() Order_4_plot.loc[:, 'Time'] = Order_4_plot.loc[:,'Timestamp'].apply(mdates.date2num) Prices = [(Order_4_plot.iloc[a,5], Order_4_plot.iloc[a,1], Order_4_plot.iloc[a,2], Order_4_plot.iloc[a,3], Order_4_plot.iloc[a,4]) for a in range(len(Order_4_plot.index))] ppp = ppp.reset_index() ppp.loc[:, 'Time'] = ppp.loc[:, 'Timestamp'].apply(mdates.date2num) Prices2 = [(ppp.iloc[a,5], ppp.iloc[a,1], ppp.iloc[a,2], ppp.iloc[a,3], ppp.iloc[a,4]) for a in range(len(ppp.index))] ######################## fig, ax = plt.subplots() fig.subplots_adjust(bottom=0.3) ######### #OPEN = Order_4_plot.loc[:,'Open'][0:2] #HIGH = Order_4_plot.loc[:,'High'][0:2] #LOW = Order_4_plot.loc[:,'Low'][0:2] #CLOSE = Order_4_plot.loc[:, 'Close'][0:2] #mpf.candlestick2_ohlc(ax, OPEN, HIGH, LOW, CLOSE, width=2, colorup='r', colordown='g') ######## #ax.xaxis.set_major_locator ax.set_xticks(ppp.loc[:,'Time']) ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:00')) mpf.candlestick_ohlc(ax, Prices2, width=0.5) ## #plt.gca() #plt.plot(Order_4_plot.loc[:, 'Timestamp'], Order_4_plot.loc[:, u'成交价'], 'v') ax.xaxis_date() ax.autoscale_view() plt.setp(plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right') plt.show()
mit
DGrady/pandas
setup.py
2
28313
#!/usr/bin/env python """ Parts of this file were taken from the pyzmq project (https://github.com/zeromq/pyzmq) which have been permitted for use under the BSD license. Parts are from lxml (https://github.com/lxml/lxml) """ import os import sys import shutil import warnings import re import platform from distutils.version import LooseVersion def is_platform_windows(): return sys.platform == 'win32' or sys.platform == 'cygwin' def is_platform_linux(): return sys.platform == 'linux2' def is_platform_mac(): return sys.platform == 'darwin' # versioning import versioneer cmdclass = versioneer.get_cmdclass() min_cython_ver = '0.23' try: import Cython ver = Cython.__version__ _CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver) except ImportError: _CYTHON_INSTALLED = False try: import pkg_resources from setuptools import setup, Command _have_setuptools = True except ImportError: # no setuptools installed from distutils.core import setup, Command _have_setuptools = False setuptools_kwargs = {} min_numpy_ver = '1.7.0' if sys.version_info[0] >= 3: setuptools_kwargs = { 'zip_safe': False, 'install_requires': ['python-dateutil >= 2', 'pytz >= 2011k', 'numpy >= %s' % min_numpy_ver], 'setup_requires': ['numpy >= %s' % min_numpy_ver], } if not _have_setuptools: sys.exit("need setuptools/distribute for Py3k" "\n$ pip install distribute") else: setuptools_kwargs = { 'install_requires': ['python-dateutil', 'pytz >= 2011k', 'numpy >= %s' % min_numpy_ver], 'setup_requires': ['numpy >= %s' % min_numpy_ver], 'zip_safe': False, } if not _have_setuptools: try: import numpy import dateutil setuptools_kwargs = {} except ImportError: sys.exit("install requires: 'python-dateutil < 2','numpy'." " use pip or easy_install." "\n $ pip install 'python-dateutil < 2' 'numpy'") from distutils.extension import Extension from distutils.command.build import build from distutils.command.build_ext import build_ext as _build_ext try: if not _CYTHON_INSTALLED: raise ImportError('No supported version of Cython installed.') try: from Cython.Distutils.old_build_ext import old_build_ext as _build_ext except ImportError: # Pre 0.25 from Cython.Distutils import build_ext as _build_ext cython = True except ImportError: cython = False if cython: try: try: from Cython import Tempita as tempita except ImportError: import tempita except ImportError: raise ImportError('Building pandas requires Tempita: ' 'pip install Tempita') from os.path import join as pjoin _pxi_dep_template = { 'algos': ['_libs/algos_common_helper.pxi.in', '_libs/algos_take_helper.pxi.in', '_libs/algos_rank_helper.pxi.in'], 'groupby': ['_libs/groupby_helper.pxi.in'], 'join': ['_libs/join_helper.pxi.in', '_libs/join_func_helper.pxi.in'], 'reshape': ['_libs/reshape_helper.pxi.in'], 'hashtable': ['_libs/hashtable_class_helper.pxi.in', '_libs/hashtable_func_helper.pxi.in'], 'index': ['_libs/index_class_helper.pxi.in'], 'sparse': ['_libs/sparse_op_helper.pxi.in'], 'interval': ['_libs/intervaltree.pxi.in'] } _pxifiles = [] _pxi_dep = {} for module, files in _pxi_dep_template.items(): pxi_files = [pjoin('pandas', x) for x in files] _pxifiles.extend(pxi_files) _pxi_dep[module] = pxi_files class build_ext(_build_ext): def build_extensions(self): # if builing from c files, don't need to # generate template output if cython: for pxifile in _pxifiles: # build pxifiles first, template extention must be .pxi.in assert pxifile.endswith('.pxi.in') outfile = pxifile[:-3] if (os.path.exists(outfile) and os.stat(pxifile).st_mtime < os.stat(outfile).st_mtime): # if .pxi.in is not updated, no need to output .pxi continue with open(pxifile, "r") as f: tmpl = f.read() pyxcontent = tempita.sub(tmpl) with open(outfile, "w") as f: f.write(pyxcontent) numpy_incl = pkg_resources.resource_filename('numpy', 'core/include') for ext in self.extensions: if hasattr(ext, 'include_dirs') and not numpy_incl in ext.include_dirs: ext.include_dirs.append(numpy_incl) _build_ext.build_extensions(self) DESCRIPTION = ("Powerful data structures for data analysis, time series," "and statistics") LONG_DESCRIPTION = """ **pandas** is a Python package providing fast, flexible, and expressive data structures designed to make working with structured (tabular, multidimensional, potentially heterogeneous) and time series data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical, **real world** data analysis in Python. Additionally, it has the broader goal of becoming **the most powerful and flexible open source data analysis / manipulation tool available in any language**. It is already well on its way toward this goal. pandas is well suited for many different kinds of data: - Tabular data with heterogeneously-typed columns, as in an SQL table or Excel spreadsheet - Ordered and unordered (not necessarily fixed-frequency) time series data. - Arbitrary matrix data (homogeneously typed or heterogeneous) with row and column labels - Any other form of observational / statistical data sets. The data actually need not be labeled at all to be placed into a pandas data structure The two primary data structures of pandas, Series (1-dimensional) and DataFrame (2-dimensional), handle the vast majority of typical use cases in finance, statistics, social science, and many areas of engineering. For R users, DataFrame provides everything that R's ``data.frame`` provides and much more. pandas is built on top of `NumPy <http://www.numpy.org>`__ and is intended to integrate well within a scientific computing environment with many other 3rd party libraries. Here are just a few of the things that pandas does well: - Easy handling of **missing data** (represented as NaN) in floating point as well as non-floating point data - Size mutability: columns can be **inserted and deleted** from DataFrame and higher dimensional objects - Automatic and explicit **data alignment**: objects can be explicitly aligned to a set of labels, or the user can simply ignore the labels and let `Series`, `DataFrame`, etc. automatically align the data for you in computations - Powerful, flexible **group by** functionality to perform split-apply-combine operations on data sets, for both aggregating and transforming data - Make it **easy to convert** ragged, differently-indexed data in other Python and NumPy data structures into DataFrame objects - Intelligent label-based **slicing**, **fancy indexing**, and **subsetting** of large data sets - Intuitive **merging** and **joining** data sets - Flexible **reshaping** and pivoting of data sets - **Hierarchical** labeling of axes (possible to have multiple labels per tick) - Robust IO tools for loading data from **flat files** (CSV and delimited), Excel files, databases, and saving / loading data from the ultrafast **HDF5 format** - **Time series**-specific functionality: date range generation and frequency conversion, moving window statistics, moving window linear regressions, date shifting and lagging, etc. Many of these principles are here to address the shortcomings frequently experienced using other languages / scientific research environments. For data scientists, working with data is typically divided into multiple stages: munging and cleaning data, analyzing / modeling it, then organizing the results of the analysis into a form suitable for plotting or tabular display. pandas is the ideal tool for all of these tasks. Note ---- Windows binaries built against NumPy 1.8.1 """ DISTNAME = 'pandas' LICENSE = 'BSD' AUTHOR = "The PyData Development Team" EMAIL = "pydata@googlegroups.com" URL = "http://pandas.pydata.org" DOWNLOAD_URL = '' CLASSIFIERS = [ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Operating System :: OS Independent', 'Intended Audience :: Science/Research', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Cython', 'Topic :: Scientific/Engineering', ] class CleanCommand(Command): """Custom distutils command to clean the .so and .pyc files.""" user_options = [("all", "a", "")] def initialize_options(self): self.all = True self._clean_me = [] self._clean_trees = [] base = pjoin('pandas','_libs', 'src') dt = pjoin(base,'datetime') src = base util = pjoin('pandas','util') parser = pjoin(base,'parser') ujson_python = pjoin(base,'ujson','python') ujson_lib = pjoin(base,'ujson','lib') self._clean_exclude = [pjoin(dt,'np_datetime.c'), pjoin(dt,'np_datetime_strings.c'), pjoin(src,'period_helper.c'), pjoin(parser,'tokenizer.c'), pjoin(parser,'io.c'), pjoin(ujson_python,'ujson.c'), pjoin(ujson_python,'objToJSON.c'), pjoin(ujson_python,'JSONtoObj.c'), pjoin(ujson_lib,'ultrajsonenc.c'), pjoin(ujson_lib,'ultrajsondec.c'), pjoin(util,'move.c'), ] for root, dirs, files in os.walk('pandas'): for f in files: filepath = pjoin(root, f) if filepath in self._clean_exclude: continue if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o', '.pyo', '.pyd', '.c', '.orig'): self._clean_me.append(filepath) for d in dirs: if d == '__pycache__': self._clean_trees.append(pjoin(root, d)) # clean the generated pxi files for pxifile in _pxifiles: pxifile = pxifile.replace(".pxi.in", ".pxi") self._clean_me.append(pxifile) for d in ('build', 'dist'): if os.path.exists(d): self._clean_trees.append(d) def finalize_options(self): pass def run(self): for clean_me in self._clean_me: try: os.unlink(clean_me) except Exception: pass for clean_tree in self._clean_trees: try: shutil.rmtree(clean_tree) except Exception: pass # we need to inherit from the versioneer # class as it encodes the version info sdist_class = cmdclass['sdist'] class CheckSDist(sdist_class): """Custom sdist that ensures Cython has compiled all pyx files to c.""" _pyxfiles = ['pandas/_libs/lib.pyx', 'pandas/_libs/hashtable.pyx', 'pandas/_libs/tslib.pyx', 'pandas/_libs/period.pyx', 'pandas/_libs/index.pyx', 'pandas/_libs/algos.pyx', 'pandas/_libs/join.pyx', 'pandas/_libs/interval.pyx', 'pandas/_libs/hashing.pyx', 'pandas/_libs/testing.pyx', 'pandas/_libs/window.pyx', 'pandas/_libs/sparse.pyx', 'pandas/_libs/parsers.pyx', 'pandas/io/sas/sas.pyx'] def initialize_options(self): sdist_class.initialize_options(self) ''' self._pyxfiles = [] for root, dirs, files in os.walk('pandas'): for f in files: if f.endswith('.pyx'): self._pyxfiles.append(pjoin(root, f)) ''' def run(self): if 'cython' in cmdclass: self.run_command('cython') else: for pyxfile in self._pyxfiles: cfile = pyxfile[:-3] + 'c' msg = "C-source file '%s' not found." % (cfile) +\ " Run 'setup.py cython' before sdist." assert os.path.isfile(cfile), msg sdist_class.run(self) class CheckingBuildExt(build_ext): """ Subclass build_ext to get clearer report if Cython is necessary. """ def check_cython_extensions(self, extensions): for ext in extensions: for src in ext.sources: if not os.path.exists(src): print("{}: -> [{}]".format(ext.name, ext.sources)) raise Exception("""Cython-generated file '%s' not found. Cython is required to compile pandas from a development branch. Please install Cython or download a release package of pandas. """ % src) def build_extensions(self): self.check_cython_extensions(self.extensions) build_ext.build_extensions(self) class CythonCommand(build_ext): """Custom distutils command subclassed from Cython.Distutils.build_ext to compile pyx->c, and stop there. All this does is override the C-compile method build_extension() with a no-op.""" def build_extension(self, ext): pass class DummyBuildSrc(Command): """ numpy's build_src command interferes with Cython's build_ext. """ user_options = [] def initialize_options(self): self.py_modules_dict = {} def finalize_options(self): pass def run(self): pass cmdclass.update({'clean': CleanCommand, 'build': build}) try: from wheel.bdist_wheel import bdist_wheel class BdistWheel(bdist_wheel): def get_tag(self): tag = bdist_wheel.get_tag(self) repl = 'macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64' if tag[2] == 'macosx_10_6_intel': tag = (tag[0], tag[1], repl) return tag cmdclass['bdist_wheel'] = BdistWheel except ImportError: pass if cython: suffix = '.pyx' cmdclass['build_ext'] = CheckingBuildExt cmdclass['cython'] = CythonCommand else: suffix = '.c' cmdclass['build_src'] = DummyBuildSrc cmdclass['build_ext'] = CheckingBuildExt lib_depends = ['reduce', 'inference', 'properties'] def srcpath(name=None, suffix='.pyx', subdir='src'): return pjoin('pandas', subdir, name + suffix) if suffix == '.pyx': lib_depends = [srcpath(f, suffix='.pyx', subdir='_libs/src') for f in lib_depends] lib_depends.append('pandas/_libs/src/util.pxd') else: lib_depends = [] plib_depends = [] common_include = ['pandas/_libs/src/klib', 'pandas/_libs/src'] def pxd(name): return os.path.abspath(pjoin('pandas', name + '.pxd')) # args to ignore warnings if is_platform_windows(): extra_compile_args=[] else: extra_compile_args=['-Wno-unused-function'] lib_depends = lib_depends + ['pandas/_libs/src/numpy_helper.h', 'pandas/_libs/src/parse_helper.h', 'pandas/_libs/src/compat_helper.h'] tseries_depends = ['pandas/_libs/src/datetime/np_datetime.h', 'pandas/_libs/src/datetime/np_datetime_strings.h', 'pandas/_libs/src/datetime_helper.h', 'pandas/_libs/src/period_helper.h', 'pandas/_libs/src/datetime.pxd'] # some linux distros require it libraries = ['m'] if not is_platform_windows() else [] ext_data = { '_libs.lib': {'pyxfile': '_libs/lib', 'depends': lib_depends + tseries_depends}, '_libs.hashtable': {'pyxfile': '_libs/hashtable', 'pxdfiles': ['_libs/hashtable'], 'depends': (['pandas/_libs/src/klib/khash_python.h'] + _pxi_dep['hashtable'])}, '_libs.tslib': {'pyxfile': '_libs/tslib', 'pxdfiles': ['_libs/src/util', '_libs/lib'], 'depends': tseries_depends, 'sources': ['pandas/_libs/src/datetime/np_datetime.c', 'pandas/_libs/src/datetime/np_datetime_strings.c', 'pandas/_libs/src/period_helper.c']}, '_libs.period': {'pyxfile': '_libs/period', 'depends': tseries_depends, 'sources': ['pandas/_libs/src/datetime/np_datetime.c', 'pandas/_libs/src/datetime/np_datetime_strings.c', 'pandas/_libs/src/period_helper.c']}, '_libs.index': {'pyxfile': '_libs/index', 'sources': ['pandas/_libs/src/datetime/np_datetime.c', 'pandas/_libs/src/datetime/np_datetime_strings.c'], 'pxdfiles': ['_libs/src/util', '_libs/hashtable'], 'depends': _pxi_dep['index']}, '_libs.algos': {'pyxfile': '_libs/algos', 'pxdfiles': ['_libs/src/util', '_libs/algos', '_libs/hashtable'], 'depends': _pxi_dep['algos']}, '_libs.groupby': {'pyxfile': '_libs/groupby', 'pxdfiles': ['_libs/src/util', '_libs/algos'], 'depends': _pxi_dep['groupby']}, '_libs.join': {'pyxfile': '_libs/join', 'pxdfiles': ['_libs/src/util', '_libs/hashtable'], 'depends': _pxi_dep['join']}, '_libs.reshape': {'pyxfile': '_libs/reshape', 'depends': _pxi_dep['reshape']}, '_libs.interval': {'pyxfile': '_libs/interval', 'pxdfiles': ['_libs/hashtable'], 'depends': _pxi_dep['interval']}, '_libs.window': {'pyxfile': '_libs/window', 'pxdfiles': ['_libs/src/skiplist', '_libs/src/util'], 'depends': ['pandas/_libs/src/skiplist.pyx', 'pandas/_libs/src/skiplist.h']}, '_libs.parsers': {'pyxfile': '_libs/parsers', 'depends': ['pandas/_libs/src/parser/tokenizer.h', 'pandas/_libs/src/parser/io.h', 'pandas/_libs/src/numpy_helper.h'], 'sources': ['pandas/_libs/src/parser/tokenizer.c', 'pandas/_libs/src/parser/io.c']}, '_libs.sparse': {'pyxfile': '_libs/sparse', 'depends': (['pandas/_libs/sparse.pyx'] + _pxi_dep['sparse'])}, '_libs.testing': {'pyxfile': '_libs/testing', 'depends': ['pandas/_libs/testing.pyx']}, '_libs.hashing': {'pyxfile': '_libs/hashing', 'depends': ['pandas/_libs/hashing.pyx']}, 'io.sas._sas': {'pyxfile': 'io/sas/sas'}, } extensions = [] for name, data in ext_data.items(): sources = [srcpath(data['pyxfile'], suffix=suffix, subdir='')] pxds = [pxd(x) for x in data.get('pxdfiles', [])] if suffix == '.pyx' and pxds: sources.extend(pxds) sources.extend(data.get('sources', [])) include = data.get('include', common_include) obj = Extension('pandas.%s' % name, sources=sources, depends=data.get('depends', []), include_dirs=include, extra_compile_args=extra_compile_args) extensions.append(obj) #---------------------------------------------------------------------- # msgpack if sys.byteorder == 'big': macros = [('__BIG_ENDIAN__', '1')] else: macros = [('__LITTLE_ENDIAN__', '1')] packer_ext = Extension('pandas.io.msgpack._packer', depends=['pandas/_libs/src/msgpack/pack.h', 'pandas/_libs/src/msgpack/pack_template.h'], sources = [srcpath('_packer', suffix=suffix if suffix == '.pyx' else '.cpp', subdir='io/msgpack')], language='c++', include_dirs=['pandas/_libs/src/msgpack'] + common_include, define_macros=macros, extra_compile_args=extra_compile_args) unpacker_ext = Extension('pandas.io.msgpack._unpacker', depends=['pandas/_libs/src/msgpack/unpack.h', 'pandas/_libs/src/msgpack/unpack_define.h', 'pandas/_libs/src/msgpack/unpack_template.h'], sources = [srcpath('_unpacker', suffix=suffix if suffix == '.pyx' else '.cpp', subdir='io/msgpack')], language='c++', include_dirs=['pandas/_libs/src/msgpack'] + common_include, define_macros=macros, extra_compile_args=extra_compile_args) extensions.append(packer_ext) extensions.append(unpacker_ext) #---------------------------------------------------------------------- # ujson if suffix == '.pyx' and 'setuptools' in sys.modules: # undo dumb setuptools bug clobbering .pyx sources back to .c for ext in extensions: if ext.sources[0].endswith(('.c','.cpp')): root, _ = os.path.splitext(ext.sources[0]) ext.sources[0] = root + suffix ujson_ext = Extension('pandas._libs.json', depends=['pandas/_libs/src/ujson/lib/ultrajson.h', 'pandas/_libs/src/datetime_helper.h', 'pandas/_libs/src/numpy_helper.h'], sources=['pandas/_libs/src/ujson/python/ujson.c', 'pandas/_libs/src/ujson/python/objToJSON.c', 'pandas/_libs/src/ujson/python/JSONtoObj.c', 'pandas/_libs/src/ujson/lib/ultrajsonenc.c', 'pandas/_libs/src/ujson/lib/ultrajsondec.c', 'pandas/_libs/src/datetime/np_datetime.c', 'pandas/_libs/src/datetime/np_datetime_strings.c'], include_dirs=['pandas/_libs/src/ujson/python', 'pandas/_libs/src/ujson/lib', 'pandas/_libs/src/datetime'] + common_include, extra_compile_args=['-D_GNU_SOURCE'] + extra_compile_args) extensions.append(ujson_ext) #---------------------------------------------------------------------- # util # extension for pseudo-safely moving bytes into mutable buffers _move_ext = Extension('pandas.util._move', depends=[], sources=['pandas/util/move.c']) extensions.append(_move_ext) if _have_setuptools: setuptools_kwargs["test_suite"] = "nose.collector" # The build cache system does string matching below this point. # if you change something, be careful. setup(name=DISTNAME, maintainer=AUTHOR, version=versioneer.get_version(), packages=['pandas', 'pandas.api', 'pandas.api.types', 'pandas.compat', 'pandas.compat.numpy', 'pandas.core', 'pandas.core.dtypes', 'pandas.core.indexes', 'pandas.core.computation', 'pandas.core.reshape', 'pandas.core.sparse', 'pandas.core.tools', 'pandas.core.util', 'pandas.computation', 'pandas.errors', 'pandas.formats', 'pandas.io', 'pandas.io.json', 'pandas.io.sas', 'pandas.io.msgpack', 'pandas.io.formats', 'pandas.io.clipboard', 'pandas._libs', 'pandas.plotting', 'pandas.stats', 'pandas.types', 'pandas.util', 'pandas.tests', 'pandas.tests.api', 'pandas.tests.dtypes', 'pandas.tests.computation', 'pandas.tests.sparse', 'pandas.tests.frame', 'pandas.tests.indexing', 'pandas.tests.indexes', 'pandas.tests.indexes.datetimes', 'pandas.tests.indexes.timedeltas', 'pandas.tests.indexes.period', 'pandas.tests.internals', 'pandas.tests.io', 'pandas.tests.io.json', 'pandas.tests.io.parser', 'pandas.tests.io.sas', 'pandas.tests.io.msgpack', 'pandas.tests.io.formats', 'pandas.tests.groupby', 'pandas.tests.reshape', 'pandas.tests.series', 'pandas.tests.scalar', 'pandas.tests.tseries', 'pandas.tests.plotting', 'pandas.tests.tools', 'pandas.tests.util', 'pandas.tools', 'pandas.tseries', ], package_data={'pandas.tests': ['data/*.csv'], 'pandas.tests.indexes': ['data/*.pickle'], 'pandas.tests.io': ['data/legacy_hdf/*.h5', 'data/legacy_pickle/*/*.pickle', 'data/legacy_msgpack/*/*.msgpack', 'data/*.csv*', 'data/*.dta', 'data/*.pickle', 'data/*.txt', 'data/*.xls', 'data/*.xlsx', 'data/*.xlsm', 'data/*.table', 'parser/data/*.csv', 'parser/data/*.gz', 'parser/data/*.bz2', 'parser/data/*.txt', 'parser/data/*.tar', 'parser/data/*.tar.gz', 'sas/data/*.csv', 'sas/data/*.xpt', 'sas/data/*.sas7bdat', 'data/*.html', 'data/html_encoding/*.html', 'json/data/*.json'], 'pandas.tests.io.formats': ['data/*.csv'], 'pandas.tests.io.msgpack': ['data/*.mp'], 'pandas.tests.reshape': ['data/*.csv'], 'pandas.tests.tseries': ['data/*.pickle'], 'pandas.io.formats': ['templates/*.tpl'] }, ext_modules=extensions, maintainer_email=EMAIL, description=DESCRIPTION, license=LICENSE, cmdclass=cmdclass, url=URL, download_url=DOWNLOAD_URL, long_description=LONG_DESCRIPTION, classifiers=CLASSIFIERS, platforms='any', **setuptools_kwargs)
bsd-3-clause
okadate/romspy
romspy/make/boundary/bry_bio_fennel_linear.py
1
2258
# -*- coding: utf-8 -*- """ Program to make bry nc file okada on 2014/10/21 """ import numpy as np import pandas as pd Chl2C = 0.05 # okada (=1/20 gChl/gC) PhyCN = 6.625 # (=106/16 molC/molN) Phy2Zoo = 0.1 def bry_bio_fennel_linear(dims, csvfiles): print 'bry_bio_fennel_linear:', 'w', csvfiles['w'] print 'bry_bio_fennel_linear:', 's', csvfiles['s'] xi_rho = dims['xi'] eta_rho = dims['eta'] s_rho = dims['s'] bio = {} bio['w'] = pd.read_csv(csvfiles['w'], skiprows=[2], parse_dates=0, index_col=0, header=[0,1]) bio['s'] = pd.read_csv(csvfiles['s'], skiprows=[2], parse_dates=0, index_col=0, header=[0,1]) for d in bio.keys(): for name in bio[d].columns: if 'N' in name: bio[d][name] = bio[d][name].apply(lambda v: v/14.0*1000.0) elif 'P' in name: bio[d][name] = bio[d][name].apply(lambda v: v/31.0*1000.0) elif 'oxygen' in name: bio[d][name] = bio[d][name].apply(lambda v: v/32.0*1000.0) for l in ['upper', 'lower']: bio[d]['phyt',l] = bio[d]['chlo',l] / Chl2C / 12.0 / PhyCN bio[d]['zoop',l] = bio[d]['phyt',l] * Phy2Zoo bio[d]['H2S',l] = 0.0 bio_out = {} ta = len(bio[d]) for name in bio[d].stack(1).columns: bio_out[name] = {} for d in bio.keys(): if d == 'w': length = eta_rho elif d == 's': length = xi_rho bio_out[name][d] = np.zeros([ta, s_rho, length]) for t, index in enumerate(bio[d].index): upper = bio[d][name,'upper'][index] lower = bio[d][name,'lower'][index] profile = np.linspace(lower, upper, s_rho) for s, value in enumerate(profile): bio_out[name][d][t,s,:] = value return bio_out, bio[d].index if __name__ == '__main__': dims = {'xi':1, 'eta':1, 's':20} fennelfiles = {'w':'/Users/teruhisa/Dropbox/Data/bry_fennel_w_linear.csv', 's':'/Users/teruhisa/Dropbox/Data/bry_fennel_s_linear.csv'} print bry_bio_fennel_linear(dims, fennelfiles)
mit
adamrp/qiime
qiime/plot_rank_abundance_graph.py
15
4175
#!/usr/bin/env python # File created on 17 Aug 2010 from __future__ import division __author__ = "Jens Reeder" __copyright__ = "Copyright 2011, The QIIME Project" __credits__ = ["Jens Reeder", "Emily TerAvest"] __license__ = "GPL" __version__ = "1.9.1-dev" __maintainer__ = "Justin Kuczynski" __email__ = "justinak@gmail.com" from matplotlib import use use('Agg', warn=False) from numpy import arange, array, sort from itertools import cycle from matplotlib.pyplot import plot, gca, ylim, xlim, show, legend, \ savefig from os.path import join from qiime.colors import data_color_order, data_colors from biom.table import UnknownIDError def make_sorted_frequencies(counts, absolute=False): """transform and sort a vector of count. counts: a column of an OTU table absolute: if True return absolute values instead of frequencies. """ c = sort(counts) c = c[c.nonzero()] c = c[::-1] if absolute: return c else: f = c / float(c.sum()) return f def plot_rank_abundance_graph( otu_count_vector, color='red', absolute=False, label=None): """Plots rank-abundance curve. otu_count_vector: a vector of otu counts for a single sample color: color of the series to plot absolute: if True plot absolute counts instead of freqs label: text for the legend of this series """ f = make_sorted_frequencies(otu_count_vector, absolute) x = arange(1, len(f) + 1) plot(x, f, color=color, alpha=0.8, label=label) ax = gca() return ax def plot_rank_abundance_graphs(result_fp, sample_names, otu_table, file_type='pdf', absolute_counts=False, x_linear_scale=False, y_linear_scale=False, no_legend=False, log_fh=None): """plot rank-abundance curves for sample specified in sample_name. result_fp: filename of output figure sample_names: comma separated string of sample names otu_table_fh: open file handle to otu table file_type: valid matplotlib file type x_linear_scale: if True draw x axis in linear scale, otherwise use log y_linear_scale: if True draw y axis in linear scale, otherwise use log no_legend: if True don't draw legend log_fh: open file handle to log file, if not None used to log """ # figure out which samples to draw if sample_names == '*': user_sample_names = otu_table.ids() else: user_sample_names = sample_names.split(',') if len(user_sample_names) < 1: raise ValueError("sample IDs must be comma separated list of " + "sample names - found %s" % sample_names) # do the actual drawing ax = None for sample_name, color in zip(user_sample_names, cycle(data_color_order)): color = data_colors[color].toHex() try: otu_count_vector = otu_table.data(sample_name, 'sample') except UnknownIDError: if log_fh: log_fh.write( "UnknownIDError: Sample name %s not in OTU table - skipping." % sample_name) continue ax = plot_rank_abundance_graph(otu_count_vector, color=color, absolute=absolute_counts, label=sample_name) ax.set_label(sample_name) if ax is None: # ax should be defined if at least one series has been drawn raise ValueError( "No data series drawn. Check your OTU table and sample names") # settings for all series ax.grid() ax.set_xlabel('Species rank') if absolute_counts: ax.set_ylabel('Absolute abundance') else: ax.set_ylabel('Relative abundance') if not x_linear_scale: ax.set_xscale('log') if not y_linear_scale: ax.set_yscale('log') if not no_legend: legend() if not result_fp.endswith(file_type): result_fp += '.' + file_type savefig(result_fp)
gpl-2.0
timestocome/Test-stock-prediction-algorithms
Curves, Markov and Bayes/StockCurves.py
1
5652
# http://github.com/timestocome # # Data is from http://finance.yahoo.com # # # let's take another look at the gain loss curves # # Whoa, baby look at that BitCoin Volatility distribution # what does it mean, idk? # there is the obvious # it's a highly volitile market prone to sudden changes # # might also mean more # Guassian distributions depend on lots of investors acting independently # a flat Platykurtic might mean that's not the case? # But that's probably obvious too, # I'm still looking for more information import numpy as np import pandas as pd from scipy.stats import kurtosis, skew from scipy.stats import norm import matplotlib.pyplot as plt import matplotlib.mlab as mlab ############################################################################# # compute stats ############################################################################# pd.set_option('display.max_rows', 5000) # http://www.itl.nist.gov/div898/handbook/eda/section3/eda35b.htm def statistics(d): d = d.replace([np.inf, -np.inf], np.nan) m = np.mean(d) v = np.var(d) std = np.std(d) n = len(d) # Fisher-Pearson coffeicient of skewness g1 = np.sum((d - m)**3 / n) / (std**3) #print('g1', g1) # size adjusted skew G1 = (np.sqrt(n*(n-1)))/(n-2) * g1 #print('G1', G1) # kurtosis kurt = np.sum((d - m)**4 / n) / (std**4) # exessive kurtosis e_kurt = kurt - 3 ''' print('----------------------------') print('mean', m) print('var', v) print('std', std) print('skew', G1, skew(d)) print('kurtosis', kurt, kurtosis(d)) print('e kurt', e_kurt) print('------------------------------') ''' return m, v, std, G1, e_kurt ########################################################################### # data has been combined using LoadAndMatchDates.py # raw data is from finance.yahoo.com ########################################################################### data = pd.read_csv('StockDataWithVolume.csv', index_col='Date', parse_dates=True) # convert to log scale data['NASDAQ'] = np.log(data['NASDAQ']) data['S&P'] = np.log(data['S&P']) data['DJIA'] = np.log(data['DJIA']) data['Russell'] = np.log(data['Russell']) data['BTC'] = np.log(data['BTC']) # add volatility # >1 increase over yesterday # <1 decrease over yesterday data['NASDAQ_dx'] = data['NASDAQ'] / data['NASDAQ'].shift(1) data['DJIA_dx'] = data['DJIA'] / data['DJIA'].shift(1) data['S&P_dx'] = data['S&P'] / data['S&P'].shift(1) data['BTC_dx'] = data['BTC'] / data['BTC'].shift(1) data['Russell_dx'] = data['Russell'] / data['Russell'].shift(1) data = data.dropna(axis=0) data = data.replace([np.inf, -np.inf], np.nan) # sanity check numbers #print(data.head()) statistics(data['NASDAQ_dx']) statistics(data['DJIA_dx']) statistics(data['Russell_dx']) statistics(data['S&P_dx']) statistics(data['BTC_dx']) ################################################################ # Let's see what kind of curves we have here. # # Most ML is done with Guassian Curves, # most economic data turns out to be Zipf Distributions ############################################################### plt.figure(figsize=(12,22)) bins = 100 plt.subplot(511) n, bins, patches = plt.hist(data['NASDAQ_dx'], bins, normed=1) kurt = kurtosis(data['NASDAQ_dx']) skw = skew(data['NASDAQ_dx']) mu, sigma = norm.fit(data['NASDAQ_dx']) y = mlab.normpdf(bins, mu, sigma) l = plt.plot(bins, y, 'r--', linewidth=2) t = 'Nasdaq mean ' + str(mu) + ', std ' + str(sigma) + ', Kurtosis ' + str(kurt) + ', Skew ' + str(skw) plt.ylabel('Probability') plt.title(t) plt.grid(True) plt.xlim(.98, 1.02) plt.ylim(0, 800) plt.subplot(512) n, bins, patches = plt.hist(data['DJIA_dx'], bins, normed=1) kurt = kurtosis(data['DJIA_dx']) skw = skew(data['DJIA_dx']) mu, sigma = norm.fit(data['DJIA_dx']) y = mlab.normpdf(bins, mu, sigma) l = plt.plot(bins, y, 'r--', linewidth=2) t = 'DJIA mean ' + str(mu) + ', std ' + str(sigma) + ', Kurtosis ' + str(kurt) + ', Skew ' + str(skw) plt.ylabel('Probability') plt.title(t) plt.grid(True) plt.xlim(.98, 1.02) plt.ylim(0, 800) plt.subplot(513) n, bins, patches = plt.hist(data['S&P_dx'], bins, normed=1) kurt = kurtosis(data['S&P_dx']) skw = skew(data['S&P_dx']) mu, sigma = norm.fit(data['S&P_dx']) y = mlab.normpdf(bins, mu, sigma) l = plt.plot(bins, y, 'r--', linewidth=2) t = 'S&P mean ' + str(mu) + ', std ' + str(sigma) + ', Kurtosis ' + str(kurt) + ', Skew ' + str(skw) plt.ylabel('Probability') plt.title(t) plt.grid(True) plt.xlim(.98, 1.02) plt.ylim(0, 800) plt.subplot(514) n, bins, patches = plt.hist(data['Russell_dx'], bins, normed=1) kurt = kurtosis(data['Russell_dx']) skw = skew(data['Russell_dx']) mu, sigma = norm.fit(data['Russell_dx']) y = mlab.normpdf(bins, mu, sigma) l = plt.plot(bins, y, 'r--', linewidth=2) t = 'Russell mean ' + str(mu) + ', std ' + str(sigma) + ', Kurtosis ' + str(kurt) + ', Skew ' + str(skw) plt.ylabel('Probability') plt.title(t) plt.grid(True) plt.xlim(.98, 1.02) plt.ylim(0, 800) # stats package chokes on BitCoin stats plt.subplot(515) n, bins, patches = plt.hist(data['BTC_dx'], bins, normed=1) m, var, std, skw, kurt = statistics(data['BTC_dx']) #mu, sigma = norm.fit(data['BTC_dx']) y = mlab.normpdf(bins, m, std) l = plt.plot(bins, y, 'r--', linewidth=4) t = 'BTC mean ' + str(m) + ', std ' + str(std) + ', Kurtosis ' + str(kurt) + ', Skew ' + str(skw) plt.ylabel('Probability') plt.title(t) plt.grid(True) plt.xlim(.98, 1.02) plt.ylim(0, 800) plt.xlabel('Gains > 1, Losses < 1, No Change = 1') plt.savefig('histogram.png') plt.show()
mit
belltailjp/scikit-learn
sklearn/tests/test_kernel_ridge.py
342
3027
import numpy as np import scipy.sparse as sp from sklearn.datasets import make_regression from sklearn.linear_model import Ridge from sklearn.kernel_ridge import KernelRidge from sklearn.metrics.pairwise import pairwise_kernels from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_array_almost_equal X, y = make_regression(n_features=10) Xcsr = sp.csr_matrix(X) Xcsc = sp.csc_matrix(X) Y = np.array([y, y]).T def test_kernel_ridge(): pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X) pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X) assert_array_almost_equal(pred, pred2) def test_kernel_ridge_csr(): pred = Ridge(alpha=1, fit_intercept=False, solver="cholesky").fit(Xcsr, y).predict(Xcsr) pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr) assert_array_almost_equal(pred, pred2) def test_kernel_ridge_csc(): pred = Ridge(alpha=1, fit_intercept=False, solver="cholesky").fit(Xcsc, y).predict(Xcsc) pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc) assert_array_almost_equal(pred, pred2) def test_kernel_ridge_singular_kernel(): # alpha=0 causes a LinAlgError in computing the dual coefficients, # which causes a fallback to a lstsq solver. This is tested here. pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X) kr = KernelRidge(kernel="linear", alpha=0) ignore_warnings(kr.fit)(X, y) pred2 = kr.predict(X) assert_array_almost_equal(pred, pred2) def test_kernel_ridge_precomputed(): for kernel in ["linear", "rbf", "poly", "cosine"]: K = pairwise_kernels(X, X, metric=kernel) pred = KernelRidge(kernel=kernel).fit(X, y).predict(X) pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K) assert_array_almost_equal(pred, pred2) def test_kernel_ridge_precomputed_kernel_unchanged(): K = np.dot(X, X.T) K2 = K.copy() KernelRidge(kernel="precomputed").fit(K, y) assert_array_almost_equal(K, K2) def test_kernel_ridge_sample_weights(): K = np.dot(X, X.T) # precomputed kernel sw = np.random.RandomState(0).rand(X.shape[0]) pred = Ridge(alpha=1, fit_intercept=False).fit(X, y, sample_weight=sw).predict(X) pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y, sample_weight=sw).predict(X) pred3 = KernelRidge(kernel="precomputed", alpha=1).fit(K, y, sample_weight=sw).predict(K) assert_array_almost_equal(pred, pred2) assert_array_almost_equal(pred, pred3) def test_kernel_ridge_multi_output(): pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X) pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X) assert_array_almost_equal(pred, pred2) pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X) pred3 = np.array([pred3, pred3]).T assert_array_almost_equal(pred2, pred3)
bsd-3-clause
imaculate/scikit-learn
benchmarks/bench_plot_omp_lars.py
28
4471
"""Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle regression (:ref:`least_angle_regression`) The input data is mostly low rank but is a fat infinite tail. """ from __future__ import print_function import gc import sys from time import time import numpy as np from sklearn.linear_model import lars_path, orthogonal_mp from sklearn.datasets.samples_generator import make_sparse_coded_signal def compute_bench(samples_range, features_range): it = 0 results = dict() lars = np.empty((len(features_range), len(samples_range))) lars_gram = lars.copy() omp = lars.copy() omp_gram = lars.copy() max_it = len(samples_range) * len(features_range) for i_s, n_samples in enumerate(samples_range): for i_f, n_features in enumerate(features_range): it += 1 n_informative = n_features / 10 print('====================') print('Iteration %03d of %03d' % (it, max_it)) print('====================') # dataset_kwargs = { # 'n_train_samples': n_samples, # 'n_test_samples': 2, # 'n_features': n_features, # 'n_informative': n_informative, # 'effective_rank': min(n_samples, n_features) / 10, # #'effective_rank': None, # 'bias': 0.0, # } dataset_kwargs = { 'n_samples': 1, 'n_components': n_features, 'n_features': n_samples, 'n_nonzero_coefs': n_informative, 'random_state': 0 } print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) y, X, _ = make_sparse_coded_signal(**dataset_kwargs) X = np.asfortranarray(X) gc.collect() print("benchmarking lars_path (with Gram):", end='') sys.stdout.flush() tstart = time() G = np.dot(X.T, X) # precomputed Gram matrix Xy = np.dot(X.T, y) lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative) delta = time() - tstart print("%0.3fs" % delta) lars_gram[i_f, i_s] = delta gc.collect() print("benchmarking lars_path (without Gram):", end='') sys.stdout.flush() tstart = time() lars_path(X, y, Gram=None, max_iter=n_informative) delta = time() - tstart print("%0.3fs" % delta) lars[i_f, i_s] = delta gc.collect() print("benchmarking orthogonal_mp (with Gram):", end='') sys.stdout.flush() tstart = time() orthogonal_mp(X, y, precompute=True, n_nonzero_coefs=n_informative) delta = time() - tstart print("%0.3fs" % delta) omp_gram[i_f, i_s] = delta gc.collect() print("benchmarking orthogonal_mp (without Gram):", end='') sys.stdout.flush() tstart = time() orthogonal_mp(X, y, precompute=False, n_nonzero_coefs=n_informative) delta = time() - tstart print("%0.3fs" % delta) omp[i_f, i_s] = delta results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram) results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp) return results if __name__ == '__main__': samples_range = np.linspace(1000, 5000, 5).astype(np.int) features_range = np.linspace(1000, 5000, 5).astype(np.int) results = compute_bench(samples_range, features_range) max_time = max(np.max(t) for t in results.values()) import matplotlib.pyplot as plt fig = plt.figure('scikit-learn OMP vs. LARS benchmark results') for i, (label, timings) in enumerate(sorted(results.iteritems())): ax = fig.add_subplot(1, 2, i+1) vmax = max(1 - timings.min(), -1 + timings.max()) plt.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax) ax.set_xticklabels([''] + map(str, samples_range)) ax.set_yticklabels([''] + map(str, features_range)) plt.xlabel('n_samples') plt.ylabel('n_features') plt.title(label) plt.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63) ax = plt.axes([0.1, 0.08, 0.8, 0.06]) plt.colorbar(cax=ax, orientation='horizontal') plt.show()
bsd-3-clause
PATRIC3/p3diffexp
expression_transform.py
1
22822
#!/usr/bin/env python import argparse import pandas as pd import json import sys import numpy as np import requests import os import uuid import csv from scipy import stats from itertools import islice try: from lib import diffexp_api except ImportError: import diffexp_api #requires 2.7.9 or greater to deal with https comodo intermediate certs if sys.version_info < (2, 7): raise "must use python 2.7 or greater" #stamp out annoying warnings that are beyond control import warnings warnings.simplefilter(action = "ignore", category = FutureWarning) pd.options.mode.chained_assignment = None #Input #1. metadata in json with the following: """ {xformat:"csv || tsv || xls || xlsx", xsetup:"gene_matrix || gene_list", source_id_type:"refseq_locus_tag || alt_locus_tag || feature_id || gi || gene_id || protein_id || seed_id", data_type: "Transcriptomics || Proteomics || Phenomics", title: "User input", desc: "User input", organism: "user input", pmid: "user_input", output_path: "path", "metadata_format":"csv || tsv || xls || xlsx"} """ #2. server info for the data api """ {"data_api":"url"} """ #Sample Output #experiment.json #{"origFileName":"filename","geneMapped":4886,"samples":8,"geneTotal":4985,"cdate":"2013-01-28 13:40:47","desc":"user input","organism":"some org","owner":"user name","title":"user input","pmid":"user input","expid":"whatever","collectionType":"ExpressionExperiment","genesMissed":99,"mdate":"2013-01-28 13:40:47"} #expression.json #{"expression":[{"log_ratio":"0.912","na_feature_id":"36731006","exp_locus_tag":"VBISalEnt101322_0001","pid":"8f2e7338-9f04-4ba5-9fe2-5365c857d57fS0","z_score":"-0.23331085637221843"}] #mapping.json #{"mapping":{"unmapped_list":[{"exp_locus_tag":"VBISalEnt101322_pg001"}],"unmapped_ids":99,"mapped_list":[{"na_feature_id":"36731006","exp_locus_tag":"VBISalEnt101322_0001"}],"mapped_ids":4886}} #sample.json #{"sample":[{"sig_log_ratio":2675,"expmean":"1.258","sampleUserGivenId":"LB_stat_AerobicM9_stat_aerobic","expname":"LB_stat_AerobicM9_stat_aerobic","pid":"8f2e7338-9f04-4ba5-9fe2-5365c857d57fS0","genes":4429,"sig_z_score":139,"expstddev":"1.483"}]} def pretty_print_POST(req): """ printed and may differ from the actual request. """ print('{}\n{}\n{}\n\n{}'.format( '-----------START-----------', req.method + ' ' + req.url, '\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()), req.body, )) #convert gene list format to gene matrix #there is definitely a more efficient conversion than this... def gene_list_to_matrix(cur_table): comparisons=set(cur_table['sampleUserGivenId']) genes=set(cur_table['exp_locus_tag']) result=pd.DataFrame(index=list(genes), columns=list(comparisons)) result['exp_locus_tag']=result.index gene_pos=cur_table.columns.get_loc('exp_locus_tag') comparison_pos=cur_table.columns.get_loc('sampleUserGivenId') ratio_pos=cur_table.columns.get_loc('log_ratio') for row in cur_table.iterrows(): gene_id=row[-1][gene_pos] comp=row[-1][comparison_pos] ratio=row[-1][ratio_pos] result[comp][gene_id]=ratio return result #convert gene matrix format to gene list #there is definitely a more efficient conversion than this... def gene_matrix_to_list(cur_table): result=pd.melt(cur_table, id_vars=['exp_locus_tag'], var_name='sampleUserGivenId', value_name='log_ratio') return result def list_to_mapping_table(cur_table): genes=set(cur_table['exp_locus_tag']) if len(genes) == 0: sys.stderr.write("No genes in differential expression gmx file\n") sys.exit(2) result=pd.DataFrame(index=list(genes)) result['exp_locus_tag']=result.index return result #deal with weird naming of columns. def fix_headers(cur_table, parameter_type, die): def fix_name(x, all_columns): fixed_name=' '.join(x.split()).strip().lower().replace(" ","_") #patrics downloadable template is not consistent with its help info if fixed_name.endswith('s') and fixed_name[:-1] in set(all_columns): fixed_name=fixed_name[:-1] return fixed_name matrix_columns=['gene_id'] list_columns=['gene_id', 'comparison_id', 'log_ratio'] template_columns=["comparison_id","title","pubmed","accession","organism","strain","gene_modification","experiment_condition","time_point"] all_columns=list_columns+template_columns check_columns=None target_setup=None if parameter_type=="xfile": target_setup= "gene_list" if all([(fix_name(x,all_columns) in list_columns) for x in cur_table.columns]) else "gene_matrix" else: target_setup="template" limit_columns=True if target_setup == 'gene_matrix': check_columns=matrix_columns limit_columns=False rename={'gene_id': 'exp_locus_tag'} elif target_setup == 'gene_list': check_columns=list_columns rename={'comparison_id':'sampleUserGivenId','gene_id': 'exp_locus_tag'} elif target_setup == 'template': check_columns=template_columns rename={'comparison_id':'sampleUserGivenId', 'title':'expname', 'gene_modification':'mutant', 'experiment_condition':'condition', 'time_point':'timepoint'} else: sys.stderr.write("unrecognized setup "+target_setup+"\n") if die: assert False cur_table.columns=[fix_name(x,all_columns) if fix_name(x,all_columns) in check_columns else x for x in cur_table.columns] columns_ok = True for i in check_columns: columns_ok=columns_ok and i in cur_table.columns if not columns_ok: sys.stderr.write("Missing appropriate column names in "+target_setup+"\n") if die: assert False if limit_columns: cur_table=cur_table[check_columns] if rename: cur_table=cur_table.rename(columns=rename) return (target_setup, cur_table) #read in the comparisons data and metadata def process_table(target_file, param_type, die, target_format="start", tries=0): tries+=1 starting=False target_setup=None if not os.path.exists(target_file): sys.stderr.write("can't find target file "+target_file+"\n") if die: sys.exit(2) if target_format=="start": starting=True fileName, fileExtension = os.path.splitext(target_file) target_format=fileExtension.replace('.','').lower() if starting and not target_format in set(["csv","tsv","xls","xlsx"]): temp_handle=open(target_file, 'rb') target_sep=csv.Sniffer().sniff("\n".join(list(islice(temp_handle,10)))) temp_handle.close() if target_sep.delimiter=="\t": target_format="tsv" sys.stdout.write("guessing "+target_format+" format\n") elif target_sep.delimiter==",": target_format="csv" sys.stdout.write("guessing "+target_format+" format\n") cur_table=None next_up="tsv" try: if target_format == 'tsv': next_up="csv" cur_table=pd.read_table(target_file, header=0) elif target_format == 'csv': next_up="xls" cur_table=pd.read_csv(target_file, header=0) elif target_format == 'xls' or target_format == 'xlsx': cur_table=pd.io.excel.read_excel(target_file, 0, index_col=None) else: sys.stderr.write("unrecognized format "+target_format+" for "+target_setup+"\n") if die: sys.exit(2) #assume the first column is "gene_id" for the comparison table and rename it as "gene_id" to handle user misspelled column name for gene_id if param_type=="xfile": cur_table=cur_table.rename(columns={cur_table.columns[0]:'gene_id'}) target_setup, cur_table=fix_headers(cur_table, param_type, die) except: sys.stdout.write("failed at reading "+target_format+" format\n") if tries > 5: raise else: sys.stdout.write("guessing "+next_up+" format\n") return process_table(target_file, param_type, die, next_up, tries) return (target_setup, cur_table) #{source_id_type:"refseq_locus_tag || alt_locus_tag || feature_id", #data_type: "Transcriptomics || Proteomics || Phenomics", #experiment_title: "User input", experiment_description: "User input", #organism name: "user input", pubmed_id: "user_input"} #Sample Output #experiment.json #{"origFileName":"filename","geneMapped":4886,"samples":8,"geneTotal":4985,"cdate":"2013-01-28 13:40:47","desc":"user input","organism":"some org","owner":"user name","title":"user input","pmid":"user input","expid":"whatever","collectionType":"ExpressionExperiment","genesMissed":99,"mdate":"2013-01-28 13:40:47"} def create_experiment_file(output_path, mapping_dict, sample_dict, expression_dict, form_data, experiment_id): experiment_dict={"geneMapped":mapping_dict["mapping"]["mapped_ids"],"samples":len(sample_dict['sample']),"geneTotal":mapping_dict["mapping"]["mapped_ids"]+mapping_dict["mapping"]["unmapped_ids"],"desc":form_data.get('desc',form_data.get("experiment_description","")),"organism":form_data.get('organism',''),"title":form_data.get("title",form_data.get("experiment_title","")),"pmid":form_data.get("pmid",""),"expid":experiment_id,"collectionType":"ExpressionExperiment","genesMissed":mapping_dict["mapping"]["unmapped_ids"]} output_file=os.path.join(output_path, 'experiment.json') out_handle=open(output_file, 'w') json.dump(experiment_dict, out_handle) out_handle.close() return experiment_dict #expression.json #{"expression":[{"log_ratio":"0.912","na_feature_id":"36731006","exp_locus_tag":"VBISalEnt101322_0001","pid":"8f2e7338-9f04-4ba5-9fe2-5365c857d57fS0","z_score":"-0.23331085637221843"}] #sample.json #{"sample":[{"sig_log_ratio":2675,"expmean":"1.258","sampleUserGivenId":"LB_stat_AerobicM9_stat_aerobic","expname":"LB_stat_AerobicM9_stat_aerobic","pid":"8f2e7338-9f04-4ba5-9fe2-5365c857d57fS0","genes":4429,"sig_z_score":139,"expstddev":"1.483"}]} def create_comparison_files(output_path, comparisons_table, mfile, form_data, experiment_id, sig_z, sig_log): #create dicts for json sample_dict={'sample':[]} expression_dict={'expression':[]} #create stats table for sample.json grouped=comparisons_table.groupby(["sampleUserGivenId"], sort=False) sample_stats=grouped.agg([np.mean, np.std])['log_ratio'] sample_stats=sample_stats.rename(columns={'mean':'expmean','std':'expstddev'}) sample_stats["genes"]=grouped.count()["exp_locus_tag"] sample_stats["pid"]=[str(experiment_id)+"S"+str(i) for i in range(0,len(sample_stats))] sample_stats["sampleUserGivenId"]=sample_stats.index sample_stats["expname"]=sample_stats.index #get zscore and significance columns comparisons_table["z_score"]=grouped.transform(stats.zscore)["log_ratio"] comparisons_table["sig_z"]=comparisons_table["z_score"].abs() >= sig_z comparisons_table["sig_log"]=comparisons_table["log_ratio"].abs() >= sig_log #store counts in stats z_score_breakdown=comparisons_table.groupby(["sampleUserGivenId","sig_z"], sort=False).count()['z_score'].unstack() if True in z_score_breakdown: sample_stats["sig_z_score"]=z_score_breakdown[True] else: z_score_breakdown.columns=[True] z_score_breakdown[True]=z_score_breakdown[True].apply(lambda x: 0) sample_stats["sig_z_score"]=z_score_breakdown[True] log_breakdown=comparisons_table.groupby(["sampleUserGivenId","sig_log"], sort=False).count()['log_ratio'].unstack() if True in log_breakdown: sample_stats["sig_log_ratio"]=log_breakdown[True] else: log_breakdown.columns=[True] log_breakdown[True]=log_breakdown[True].apply(lambda x: 0) sample_stats["sig_log_ratio"]=log_breakdown[True] sample_stats["sig_log_ratio"]=sample_stats["sig_log_ratio"].fillna(0).astype('int64') sample_stats["sig_z_score"]=sample_stats["sig_z_score"].fillna(0).astype('int64') #set pid's for expression.json comparisons_table=comparisons_table.merge(sample_stats[["pid","sampleUserGivenId"]], how="left", on="sampleUserGivenId") #pull in metadata spreadsheet if provided if mfile and mfile.strip(): sys.stdout.write("reading metadata template\n") target_setup, meta_table=process_table(mfile, "mfile", die=True) try: meta_key="sampleUserGivenId" to_add=meta_table.columns-sample_stats.columns meta_table=meta_table.set_index('sampleUserGivenId') sample_stats.update(meta_table) sample_stats=sample_stats.merge(meta_table[to_add], left_index=True, right_index=True, how='left') except: sys.stderr.write("failed to parse user provide metadata template\n") sys.exit(2) #populate json dicts sample_stats=sample_stats.fillna("") sample_dict['sample']=json.loads(sample_stats.to_json(orient='records', date_format='iso')) #sample_dict['sample']=sample_stats.to_dict(outtype='records') cols = [col for col in comparisons_table.columns if col not in ['sig_z', 'sig_log']] expression_dict['expression']=json.loads(comparisons_table[cols].to_json(orient='records')) output_file=os.path.join(output_path, 'sample.json') out_handle=open(output_file, 'w') json.dump(sample_dict, out_handle) out_handle.close() output_file=os.path.join(output_path, 'expression.json') out_handle=open(output_file, 'w') json.dump(expression_dict, out_handle) out_handle.close() return (sample_dict, expression_dict) #mapping.json #{"mapping":{"unmapped_list":[{"exp_locus_tag":"VBISalEnt101322_pg001"}],"unmapped_ids":99,"mapped_list":[{"na_feature_id":"36731006","exp_locus_tag":"VBISalEnt101322_0001"}],"mapped_ids":4886}} #creates mapping.json for results def create_mapping_file(output_path, mapping_table, form_data): mapping_dict={"mapping":{"unmapped_list":[],"unmapped_ids":0,"mapped_list":[],"mapped_ids":0}} mapping_dict['mapping']['unmapped_list']=mapping_table[mapping_table.isnull().any(axis=1)][['exp_locus_tag']].to_dict('records') mapping_dict['mapping']['mapped_list']=mapping_table[mapping_table.notnull().all(axis=1)].to_dict('records') mapping_dict['mapping']['unmapped_ids']=len(mapping_dict['mapping']['unmapped_list']) mapping_dict['mapping']['mapped_ids']=len(mapping_dict['mapping']['mapped_list']) output_file=os.path.join(output_path, 'mapping.json') out_handle=open(output_file, 'w') json.dump(mapping_dict, out_handle) out_handle.close() return mapping_dict #mapped_list=[{form_data["source_id_type"]: i["Map ID"], "exp_locus_tag":i['Gene ID']} for i in mapping_table[mapping_table.notnull().any(axis=1)]] #mapped_list=[{form_data["source_id_type"]: i["Map ID"], "exp_locus_tag":i["Gene ID"]} for i in mapping_table.query('Gene ID != @np.nan')] def place_ids(query_results,cur_table,form_data): source_types=form_data["source_types"]+form_data["int_types"] count=0 try: for d in query_results.json()['response']['docs']: source_ids=[] target_id=None for id_type in source_types: if id_type in d: source_ids.append(d[id_type]) if 'feature_id' in d: target_id=d['feature_id'] if target_id: #because which of the source id's are in the input data check them locally against the exp_locus_tag for source_id in source_ids: if source_id in cur_table["feature_id"]: count+=1 cur_table["feature_id"][source_id]=target_id break except ValueError: sys.stderr.write("mapping failed. either PATRICs API is down or the Gene IDs are unknown\n") raise if count==0: sys.stderr.write("mapping failed. either PATRICs API is down or the Gene IDs are unknown\n") sys.exit(2) def make_map_query(id_list, form_data, server_setup, chunk_size): id_list = id_list.apply(str) source_types=form_data["source_types"] int_types=form_data["int_types"] current_query={'q':""} map_queries=[] int_ids=[] if "source_id_type" in form_data and len(form_data["source_id_type"]) > 0: source_types=[form_data["source_id_type"]] else: for id in id_list: if np.issubdtype(type(id), np.number) or id.isdigit(): int_ids.append(str(id)) if len(int_ids): for s_type in int_types: map_queries.append("("+s_type+":("+" OR ".join(int_ids)+"))") for s_type in source_types: map_queries.append("("+s_type+":("+" OR ".join(id_list)+"))") if "host" in form_data and form_data["host"]: current_query["q"]+="("+" OR ".join(map_queries)+") AND annotation:RefSeq" else: current_query["q"]+="("+" OR ".join(map_queries)+") AND annotation:PATRIC" if "genome_id" in form_data and form_data["genome_id"]: current_query["q"]+=" AND genome_id:"+form_data["genome_id"] current_query["fl"]="feature_id,"+",".join(source_types+int_types) current_query["rows"]="20000" current_query["wt"]="json" headers = {"Content-Type": "application/solrquery+x-www-form-urlencoded", "accept":"application/solr+json"} #print "switch THE HEADER BACK!" #headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'} req = requests.Request('POST', server_setup["data_api"], headers=headers, data=current_query) diffexp_api.authenticateByEnv(req) prepared = req.prepare() #pretty_print_POST(prepared) s = requests.Session() response=s.send(prepared) if not response.ok: sys.stderr.write("Error code %s invoking data api: %s\nquery: %s\n" % (response.status_code, response.text, current_query)) sys.exit(2) return response def chunker(seq, size): return (seq[pos:pos + size] for pos in xrange(0, len(seq), size)) def map_gene_ids(cur_table, form_data, server_setup, host=False): cur_table["feature_id"]=np.nan chunk_size=1000 if host: for source_id in cur_table["exp_locus_tag"]: cur_table["feature_id"][source_id]=source_id else: for i in chunker(cur_table['exp_locus_tag'], chunk_size): mapping_results=make_map_query(i, form_data, server_setup, chunk_size) place_ids(mapping_results, cur_table, form_data) def main(): sig_z=2 sig_log=1 valid_formats=set(['csv', 'tsv', 'xls', 'xlsx']) valid_setups=set(['gene_matrix','gene_list']) #req_info=['xformat','xsetup','source_id_type','data_type','experiment_title','experiment_description','organism'] req_info=['data_type','experiment_title','experiment_description','organism'] parser = argparse.ArgumentParser() parser.add_argument('--xfile', help='the source Expression comparisons file', required=True) parser.add_argument('--mfile', help='the metadata template if it exists', required=False) parser.add_argument('--output_path', help='location for output', required=True) parser.add_argument('--host', help='host genome, prevent id mapping', action='store_true', default=False, required=False) userinfo = parser.add_mutually_exclusive_group(required=True) userinfo.add_argument('--ufile', help='json file from user input') userinfo.add_argument('--ustring', help='json string from user input') serverinfo = parser.add_mutually_exclusive_group(required=True) serverinfo.add_argument('--sfile', help='server setup JSON file') serverinfo.add_argument('--sstring', help='server setup JSON string') map_args = parser.parse_args() if len(sys.argv) ==1: parser.print_help() sys.exit(2) #get comparison and metadata files xfile=map_args.xfile mfile=map_args.mfile if 'mfile' in map_args else None #parse user form data form_data=None user_parse=None server_parse=None parse_server = json.loads if 'sstring' in map_args else json.load try: form_data = json.loads(map_args.ustring) if map_args.ustring else json.load(open(map_args.ufile,'r')) except: sys.stderr.write("Failed to parse user provided form data \n") raise #parse setup data try: server_setup= json.loads(map_args.sstring) if map_args.sstring else json.load(open(map_args.sfile,'r')) except: sys.stderr.write("Failed to parse server data\n") raise #part of auto-detection of id type add source id types to map from form_data["source_types"]=["refseq_locus_tag","alt_locus_tag","feature_id","protein_id","patric_id"]#,"gi"] form_data["int_types"]=["gi","gene_id"] #make sure all required info present missing=[x not in form_data for x in req_info] if (any(missing)): sys.stderr.write("Missing required user input data: "+" ".join([req_info[i] for i in range(len(missing)) if missing[i]])+"\n") sys.exit(2) #if (mfile or 'metadata_format' in form_data) and ('metadata_format' not in form_data or not mfile): # sys.stderr.write("Expression transformation: (file,format) pair must be given for metadata template\n") #sys.exit(2) #read comparisons file sys.stdout.write("reading comparisons file\n") target_setup, comparisons_table=process_table(xfile, "xfile", die=True) output_path=map_args.output_path #convert gene matrix to list if target_setup == 'gene_matrix': comparisons_table=gene_matrix_to_list(comparisons_table) #limit log ratios comparisons_table.ix[comparisons_table["log_ratio"] > 1000000, 'log_ratio']=1000000 comparisons_table.ix[comparisons_table["log_ratio"] < -1000000, 'log_ratio']=-1000000 comparisons_table=comparisons_table.dropna() comparisons_table=comparisons_table[comparisons_table.exp_locus_tag != "-"] #map gene ids mapping_table=list_to_mapping_table(comparisons_table) map_gene_ids(mapping_table, form_data, server_setup, map_args.host) comparisons_table=comparisons_table.merge(mapping_table, how='left', on='exp_locus_tag') #create json files to represent experiment experiment_id=str(uuid.uuid1()) mapping_dict=create_mapping_file(output_path, mapping_table, form_data) (sample_dict, expression_dict) = create_comparison_files(output_path, comparisons_table, mfile, form_data, experiment_id, sig_z, sig_log) experiment_dict=create_experiment_file(output_path, mapping_dict, sample_dict, expression_dict, form_data, experiment_id) sys.stdout.write(json.dumps(experiment_dict)+"\n") if __name__ == "__main__": main()
mit
cauchycui/scikit-learn
sklearn/ensemble/__init__.py
217
1307
""" The :mod:`sklearn.ensemble` module includes ensemble-based methods for classification and regression. """ from .base import BaseEnsemble from .forest import RandomForestClassifier from .forest import RandomForestRegressor from .forest import RandomTreesEmbedding from .forest import ExtraTreesClassifier from .forest import ExtraTreesRegressor from .bagging import BaggingClassifier from .bagging import BaggingRegressor from .weight_boosting import AdaBoostClassifier from .weight_boosting import AdaBoostRegressor from .gradient_boosting import GradientBoostingClassifier from .gradient_boosting import GradientBoostingRegressor from .voting_classifier import VotingClassifier from . import bagging from . import forest from . import weight_boosting from . import gradient_boosting from . import partial_dependence __all__ = ["BaseEnsemble", "RandomForestClassifier", "RandomForestRegressor", "RandomTreesEmbedding", "ExtraTreesClassifier", "ExtraTreesRegressor", "BaggingClassifier", "BaggingRegressor", "GradientBoostingClassifier", "GradientBoostingRegressor", "AdaBoostClassifier", "AdaBoostRegressor", "VotingClassifier", "bagging", "forest", "gradient_boosting", "partial_dependence", "weight_boosting"]
bsd-3-clause
chrsrds/scikit-learn
examples/cluster/plot_birch_vs_minibatchkmeans.py
16
3640
""" ================================= Compare BIRCH and MiniBatchKMeans ================================= This example compares the timing of Birch (with and without the global clustering step) and MiniBatchKMeans on a synthetic dataset having 100,000 samples and 2 features generated using make_blobs. If ``n_clusters`` is set to None, the data is reduced from 100,000 samples to a set of 158 clusters. This can be viewed as a preprocessing step before the final (global) clustering step that further reduces these 158 clusters to 100 clusters. """ # Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com # Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # License: BSD 3 clause print(__doc__) from itertools import cycle from time import time import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as colors from sklearn.cluster import Birch, MiniBatchKMeans from sklearn.datasets.samples_generator import make_blobs # Generate centers for the blobs so that it forms a 10 X 10 grid. xx = np.linspace(-22, 22, 10) yy = np.linspace(-22, 22, 10) xx, yy = np.meshgrid(xx, yy) n_centres = np.hstack((np.ravel(xx)[:, np.newaxis], np.ravel(yy)[:, np.newaxis])) # Generate blobs to do a comparison between MiniBatchKMeans and Birch. X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0) # Use all colors that matplotlib provides by default. colors_ = cycle(colors.cnames.keys()) fig = plt.figure(figsize=(12, 4)) fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9) # Compute clustering with Birch with and without the final clustering step # and plot. birch_models = [Birch(threshold=1.7, n_clusters=None), Birch(threshold=1.7, n_clusters=100)] final_step = ['without global clustering', 'with global clustering'] for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)): t = time() birch_model.fit(X) time_ = time() - t print("Birch %s as the final step took %0.2f seconds" % ( info, (time() - t))) # Plot result labels = birch_model.labels_ centroids = birch_model.subcluster_centers_ n_clusters = np.unique(labels).size print("n_clusters : %d" % n_clusters) ax = fig.add_subplot(1, 3, ind + 1) for this_centroid, k, col in zip(centroids, range(n_clusters), colors_): mask = labels == k ax.scatter(X[mask, 0], X[mask, 1], c='w', edgecolor=col, marker='.', alpha=0.5) if birch_model.n_clusters is None: ax.scatter(this_centroid[0], this_centroid[1], marker='+', c='k', s=25) ax.set_ylim([-25, 25]) ax.set_xlim([-25, 25]) ax.set_autoscaley_on(False) ax.set_title('Birch %s' % info) # Compute clustering with MiniBatchKMeans. mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100, n_init=10, max_no_improvement=10, verbose=0, random_state=0) t0 = time() mbk.fit(X) t_mini_batch = time() - t0 print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch) mbk_means_labels_unique = np.unique(mbk.labels_) ax = fig.add_subplot(1, 3, 3) for this_centroid, k, col in zip(mbk.cluster_centers_, range(n_clusters), colors_): mask = mbk.labels_ == k ax.scatter(X[mask, 0], X[mask, 1], marker='.', c='w', edgecolor=col, alpha=0.5) ax.scatter(this_centroid[0], this_centroid[1], marker='+', c='k', s=25) ax.set_xlim([-25, 25]) ax.set_ylim([-25, 25]) ax.set_title("MiniBatchKMeans") ax.set_autoscaley_on(False) plt.show()
bsd-3-clause
toastedcornflakes/scikit-learn
sklearn/utils/tests/test_shortest_path.py
303
2841
from collections import defaultdict import numpy as np from numpy.testing import assert_array_almost_equal from sklearn.utils.graph import (graph_shortest_path, single_source_shortest_path_length) def floyd_warshall_slow(graph, directed=False): N = graph.shape[0] #set nonzero entries to infinity graph[np.where(graph == 0)] = np.inf #set diagonal to zero graph.flat[::N + 1] = 0 if not directed: graph = np.minimum(graph, graph.T) for k in range(N): for i in range(N): for j in range(N): graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j]) graph[np.where(np.isinf(graph))] = 0 return graph def generate_graph(N=20): #sparse grid of distances rng = np.random.RandomState(0) dist_matrix = rng.random_sample((N, N)) #make symmetric: distances are not direction-dependent dist_matrix = dist_matrix + dist_matrix.T #make graph sparse i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2)) dist_matrix[i] = 0 #set diagonal to zero dist_matrix.flat[::N + 1] = 0 return dist_matrix def test_floyd_warshall(): dist_matrix = generate_graph(20) for directed in (True, False): graph_FW = graph_shortest_path(dist_matrix, directed, 'FW') graph_py = floyd_warshall_slow(dist_matrix.copy(), directed) assert_array_almost_equal(graph_FW, graph_py) def test_dijkstra(): dist_matrix = generate_graph(20) for directed in (True, False): graph_D = graph_shortest_path(dist_matrix, directed, 'D') graph_py = floyd_warshall_slow(dist_matrix.copy(), directed) assert_array_almost_equal(graph_D, graph_py) def test_shortest_path(): dist_matrix = generate_graph(20) # We compare path length and not costs (-> set distances to 0 or 1) dist_matrix[dist_matrix != 0] = 1 for directed in (True, False): if not directed: dist_matrix = np.minimum(dist_matrix, dist_matrix.T) graph_py = floyd_warshall_slow(dist_matrix.copy(), directed) for i in range(dist_matrix.shape[0]): # Non-reachable nodes have distance 0 in graph_py dist_dict = defaultdict(int) dist_dict.update(single_source_shortest_path_length(dist_matrix, i)) for j in range(graph_py[i].shape[0]): assert_array_almost_equal(dist_dict[j], graph_py[i, j]) def test_dijkstra_bug_fix(): X = np.array([[0., 0., 4.], [1., 0., 2.], [0., 5., 0.]]) dist_FW = graph_shortest_path(X, directed=False, method='FW') dist_D = graph_shortest_path(X, directed=False, method='D') assert_array_almost_equal(dist_D, dist_FW)
bsd-3-clause
rowhit/h2o-2
py/testdir_single_jvm/test_GLM2_basic_cmp.py
9
7620
import unittest, random, sys, time sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_import as h2i, h2o_exec, h2o_glm, h2o_jobs import h2o_print as h2p SCIPY_INSTALLED = True try: import scipy as sp import numpy as np import sklearn as sk print "numpy, scipy and sklearn are installed. Will do extra checks" except ImportError: print "numpy, scipy or sklearn is not installed. Will just do h2o stuff" SCIPY_INSTALLED = False #********************************************************************************* def do_scipy_glm(self, bucket, csvPathname, L, family='binomial'): h2p.red_print("Now doing sklearn") h2p.red_print("\nsee http://scikit-learn.org/0.11/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression") import numpy as np import scipy as sp from sklearn.linear_model import LogisticRegression from numpy import loadtxt csvPathnameFull = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True) # make sure it does fp divide C = 1/(L+0.0) print "C regularization:", C dataset = np.loadtxt( open(csvPathnameFull,'r'), skiprows=1, # skip the header delimiter=',', dtype='float'); print "\ncsv read for training, done" n_features = len(dataset[0]) - 1; print "n_features:", n_features # don't want ID (col 0) or CAPSULE (col 1) # get CAPSULE target = [x[1] for x in dataset] # slice off the first 2 train = np.array ( [x[2:] for x in dataset] ) n_samples, n_features = train.shape print "n_samples:", n_samples, "n_features:", n_features print "histogram of target" print sp.histogram(target,3) print "len(train):", len(train) print "len(target):", len(target) print "dataset shape:", dataset.shape if family!='binomial': raise Exception("Only have binomial logistic for scipy") print "\nTrying l2" clf2 = LogisticRegression( C=C, dual=False, fit_intercept=True, intercept_scaling=1, penalty='l2', tol=0.0001); # train the classifier start = time.time() clf2.fit(train, target) print "L2 fit took", time.time() - start, "seconds" # print "coefficients:", clf2.coef_ cstring = "".join([("%.5e " % c) for c in clf2.coef_[0]]) h2p.green_print("sklearn L2 C", C) h2p.green_print("sklearn coefficients:", cstring) h2p.green_print("sklearn intercept:", "%.5e" % clf2.intercept_[0]) h2p.green_print("sklearn score:", clf2.score(train,target)) print "\nTrying l1" clf1 = LogisticRegression( C=C, dual=False, fit_intercept=True, intercept_scaling=1, penalty='l1', tol=0.0001); # train the classifier start = time.time() clf1.fit(train, target) print "L1 fit took", time.time() - start, "seconds" # print "coefficients:", clf1.coef_ cstring = "".join([("%.5e " % c) for c in clf1.coef_[0]]) h2p.green_print("sklearn L1 C", C) h2p.green_print("sklearn coefficients:", cstring) h2p.green_print("sklearn intercept:", "%.5e" % clf1.intercept_[0]) h2p.green_print("sklearn score:", clf1.score(train,target)) # attributes are accessed in the normal python way dx = clf1.__dict__ dx.keys() ## ['loss', 'C', 'dual', 'fit_intercept', 'class_weight_label', 'label_', ## 'penalty', 'multi_class', 'raw_coef_', 'tol', 'class_weight', ## 'intercept_scaling'] #********************************************************************************* def do_h2o_glm(self, bucket, csvPathname, L, family='binomial'): h2p.red_print("\nNow doing h2o") parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='local', timeoutSecs=180) # save the resolved pathname for use in the sklearn csv read below inspect = h2o_cmd.runInspect(None, parseResult['destination_key']) print inspect print "\n" + csvPathname, \ " numRows:", "{:,}".format(inspect['numRows']), \ " numCols:", "{:,}".format(inspect['numCols']) x = 'ID' y = 'CAPSULE' family = family alpha = '0' lambda_ = L nfolds = '0' f = 'prostate' modelKey = 'GLM_' + f kwargs = { 'response' : y, 'ignored_cols' : x, 'family' : family, 'lambda' : lambda_, 'alpha' : alpha, 'n_folds' : nfolds, # passes if 0, fails otherwise 'destination_key' : modelKey, } timeoutSecs = 60 start = time.time() glmResult = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs) # this stuff was left over from when we got the result after polling the jobs list # okay to do it again # GLM2: when it redirects to the model view, we no longer have the job_key! (unlike the first response and polling) (warnings, clist, intercept) = h2o_glm.simpleCheckGLM(self, glmResult, None, **kwargs) cstring = "".join([("%.5e " % c) for c in clist]) h2p.green_print("h2o alpha ", alpha) h2p.green_print("h2o lambda ", lambda_) h2p.green_print("h2o coefficient list:", cstring) h2p.green_print("h2o intercept", "%.5e " % intercept) # other stuff in the json response glm_model = glmResult['glm_model'] _names = glm_model['_names'] coefficients_names = glm_model['coefficients_names'] # the first submodel is the right one, if onely one lambda is provided as a parameter above submodels = glm_model['submodels'][0] beta = submodels['beta'] h2p.red_print("beta:", beta) norm_beta = submodels['norm_beta'] iteration = submodels['iteration'] validation = submodels['validation'] auc = validation['auc'] aic = validation['aic'] null_deviance = validation['null_deviance'] residual_deviance = validation['residual_deviance'] print '_names', _names print 'coefficients_names', coefficients_names # did beta get shortened? the simple check confirms names/beta/norm_beta are same length print 'beta', beta print 'iteration', iteration print 'auc', auc #********************************************************************************* # the actual test that will run both #********************************************************************************* class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): h2o.init(1, java_heap_GB=10) @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_GLM2_basic_cmp(self): bucket = 'smalldata' importFolderPath = "logreg" csvFilename = 'prostate.csv' csvPathname = importFolderPath + "/" + csvFilename # use L for lambda in h2o, C=1/L in sklearn family = 'binomial' L = 1e-4 do_h2o_glm(self, bucket, csvPathname, L, family) if SCIPY_INSTALLED: do_scipy_glm(self, bucket, csvPathname, L, family) # since we invert for C, can't use 0 (infinity) L = 1e-13 # C in sklearn Specifies the strength of the regularization. # The smaller it is the bigger in the regularization. # we'll set it to 1/L do_h2o_glm(self, bucket, csvPathname, L, family) if SCIPY_INSTALLED: do_scipy_glm(self, bucket, csvPathname, L, family) if __name__ == '__main__': h2o.unit_main()
apache-2.0
detrout/debian-statsmodels
statsmodels/examples/tsa/arma_plots.py
33
2516
'''Plot acf and pacf for some ARMA(1,1) ''' from __future__ import print_function import numpy as np import matplotlib.pyplot as plt import statsmodels.tsa.arima_process as tsp from statsmodels.sandbox.tsa.fftarma import ArmaFft as FftArmaProcess import statsmodels.tsa.stattools as tss from statsmodels.graphics.tsaplots import plotacf np.set_printoptions(precision=2) arcoefs = [0.9, 0., -0.5] #[0.9, 0.5, 0.1, 0., -0.5] macoefs = [0.9, 0., -0.5] #[0.9, 0.5, 0.1, 0., -0.5] nsample = 1000 nburnin = 1000 sig = 1 fig = plt.figure(figsize=(8, 13)) fig.suptitle('ARMA: Autocorrelation (left) and Partial Autocorrelation (right)') subplotcount = 1 nrows = 4 for arcoef in arcoefs[:-1]: for macoef in macoefs[:-1]: ar = np.r_[1., -arcoef] ma = np.r_[1., macoef] #y = tsp.arma_generate_sample(ar,ma,nsample, sig, burnin) #armaprocess = FftArmaProcess(ar, ma, nsample) #TODO: make n optional #armaprocess.plot4() armaprocess = tsp.ArmaProcess(ar, ma) acf = armaprocess.acf(20)[:20] pacf = armaprocess.pacf(20)[:20] ax = fig.add_subplot(nrows, 2, subplotcount) plotacf(acf, ax=ax) ## ax.set_title('Autocorrelation \nar=%s, ma=%rs' % (ar, ma), ## size='xx-small') ax.text(0.7, 0.6, 'ar =%s \nma=%s' % (ar, ma), transform=ax.transAxes, horizontalalignment='left', #'right', size='xx-small') ax.set_xlim(-1,20) subplotcount +=1 ax = fig.add_subplot(nrows, 2, subplotcount) plotacf(pacf, ax=ax) ## ax.set_title('Partial Autocorrelation \nar=%s, ma=%rs' % (ar, ma), ## size='xx-small') ax.text(0.7, 0.6, 'ar =%s \nma=%s' % (ar, ma), transform=ax.transAxes, horizontalalignment='left', #'right', size='xx-small') ax.set_xlim(-1,20) subplotcount +=1 axs = fig.axes ### turn of the 2nd column y tick labels ##for ax in axs[1::2]:#[:,1].flat: ## for label in ax.get_yticklabels(): label.set_visible(False) # turn off all but the bottom xtick labels for ax in axs[:-2]:#[:-1,:].flat: for label in ax.get_xticklabels(): label.set_visible(False) # use a MaxNLocator on the first column y axis if you have a bunch of # rows to avoid bunching; example below uses at most 3 ticks import matplotlib.ticker as mticker for ax in axs: #[::2]:#[:,1].flat: ax.yaxis.set_major_locator( mticker.MaxNLocator(3 )) plt.show()
bsd-3-clause
qPCR4vir/orange3
Orange/widgets/visualize/owparallelgraph.py
3
35083
# # OWParallelGraph.py # from collections import defaultdict import os import sys import math import numpy as np from PyQt4.QtCore import QLineF, Qt, QEvent, QRect, QPoint, QPointF from PyQt4.QtGui import QGraphicsPathItem, QPixmap, QColor, QBrush, QPen, QToolTip, QPainterPath, QPolygonF, QGraphicsPolygonItem from Orange.preprocess import Discretize from Orange.preprocess.discretize import EqualFreq from Orange.statistics.contingency import get_contingencies, get_contingency from Orange.widgets import gui from Orange.widgets.settings import Setting from Orange.widgets.utils.colorpalette import ContinuousPaletteGenerator from Orange.widgets.utils.plot import OWPlot, UserAxis, AxisStart, AxisEnd, OWCurve, OWPoint, PolygonCurve, \ xBottom, yLeft, OWPlotItem from Orange.widgets.utils.scaling import get_variable_values_sorted, ScaleData NO_STATISTICS = 0 MEANS = 1 MEDIAN = 2 VISIBLE = 196 TRANSPARENT = 64 HIDDEN = 0 class OWParallelGraph(OWPlot, ScaleData): show_distributions = Setting(False) show_attr_values = Setting(True) show_statistics = Setting(default=False) group_lines = Setting(default=False) number_of_groups = Setting(default=5) number_of_steps = Setting(default=30) use_splines = Setting(False) alpha_value = Setting(150) alpha_value_2 = Setting(150) def __init__(self, widget, parent=None, name=None): OWPlot.__init__(self, parent, name, axes=[], widget=widget) ScaleData.__init__(self) self.update_antialiasing(False) self.widget = widget self.last_selected_curve = None self.enableGridXB(0) self.enableGridYL(0) self.domain_contingencies = None self.auto_update_axes = 1 self.old_legend_keys = [] self.selection_conditions = {} self.attributes = [] self.visualized_mid_labels = [] self.attribute_indices = [] self.valid_data = [] self.groups = {} self.colors = None self.selected_examples = [] self.unselected_examples = [] self.bottom_pixmap = QPixmap(gui.resource_filename("icons/upgreenarrow.png")) self.top_pixmap = QPixmap(gui.resource_filename("icons/downgreenarrow.png")) def set_data(self, data, subset_data=None, **args): self.start_progress() self.set_progress(1, 100) self.data = data self.have_data = True self.domain_contingencies = None self.groups = {} OWPlot.setData(self, data) ScaleData.set_data(self, data, no_data=True, **args) self.end_progress() def update_data(self, attributes, mid_labels=None): old_selection_conditions = self.selection_conditions self.clear() if not (self.have_data): return if len(attributes) < 2: return if self.show_statistics: self.alpha_value = TRANSPARENT self.alpha_value_2 = VISIBLE else: self.alpha_value = VISIBLE self.alpha_value_2 = TRANSPARENT self.attributes = attributes self.attribute_indices = [self.data_domain.index(name) for name in self.attributes] self.valid_data = self.get_valid_list(self.attribute_indices) self.visualized_mid_labels = mid_labels self.add_relevant_selections(old_selection_conditions) class_var = self.data_domain.class_var if not class_var: self.colors = None elif class_var.is_discrete: self.colors = class_var.colors elif class_var.is_continuous: self.colors = ContinuousPaletteGenerator(*class_var.colors) if self.group_lines: self.show_statistics = False self.draw_groups() else: self.show_statistics = False self.draw_curves() self.draw_distributions() self.draw_axes() self.draw_statistics() self.draw_mid_labels(mid_labels) self.draw_legend() self.replot() def add_relevant_selections(self, old_selection_conditions): """Keep only conditions related to the currently visualized attributes""" for name, value in old_selection_conditions.items(): if name in self.attributes: self.selection_conditions[name] = value def draw_axes(self): self.remove_all_axes() for i in range(len(self.attributes)): axis_id = UserAxis + i a = self.add_axis(axis_id, line=QLineF(i, 0, i, 1), arrows=AxisStart | AxisEnd, zoomable=True) a.always_horizontal_text = True a.max_text_width = 100 a.title_margin = -10 a.text_margin = 0 a.setZValue(5) self.set_axis_title(axis_id, self.data_domain[self.attributes[i]].name) self.set_show_axis_title(axis_id, self.show_attr_values) if self.show_attr_values: attr = self.data_domain[self.attributes[i]] if attr.is_continuous: self.set_axis_scale(axis_id, self.attr_values[attr.name][0], self.attr_values[attr.name][1]) elif attr.is_discrete: attribute_values = get_variable_values_sorted(self.data_domain[self.attributes[i]]) attr_len = len(attribute_values) values = [float(1.0 + 2.0 * j) / float(2 * attr_len) for j in range(len(attribute_values))] a.set_bounds((0, 1)) self.set_axis_labels(axis_id, labels=attribute_values, values=values) def draw_curves(self): conditions = {name: self.attributes.index(name) for name in self.selection_conditions.keys()} def is_selected(example): return all(self.selection_conditions[name][0] <= example[index] <= self.selection_conditions[name][1] for (name, index) in list(conditions.items())) selected_curves = defaultdict(list) background_curves = defaultdict(list) diff, mins = [], [] for i in self.attribute_indices: var = self.data_domain[i] if var.is_discrete: diff.append(len(var.values)) mins.append(-0.5) else: diff.append(self.domain_data_stat[i].max - self.domain_data_stat[i].min or 1) mins.append(self.domain_data_stat[i].min) def scale_row(row): return [(x - m) / d for x, m, d in zip(row, mins, diff)] for row_idx, row in enumerate(self.data[:, self.attribute_indices]): if any(np.isnan(v) for v in row.x): continue color = tuple(self.select_color(row_idx)) if is_selected(row): color += (self.alpha_value,) selected_curves[color].extend(scale_row(row)) self.selected_examples.append(row_idx) else: color += (self.alpha_value_2,) background_curves[color].extend(row) self.unselected_examples.append(row_idx) self._draw_curves(selected_curves) self._draw_curves(background_curves) def select_color(self, row_index): if self.data_has_class: if self.data_has_continuous_class: return self.continuous_palette.getRGB( self.data[row_index, self.data_class_index]) else: return self.colors[ int(self.data[row_index, self.data_class_index])] else: return 0, 0, 0 def _draw_curves(self, selected_curves): n_attr = len(self.attributes) for color, y_values in sorted(selected_curves.items()): n_rows = int(len(y_values) / n_attr) x_values = list(range(n_attr)) * n_rows curve = OWCurve() curve.set_style(OWCurve.Lines) curve.set_color(QColor(*color)) curve.set_segment_length(n_attr) curve.set_data(x_values, y_values) curve.attach(self) def draw_groups(self): phis, mus, sigmas = self.compute_groups() diff, mins = [], [] for i in self.attribute_indices: var = self.data_domain[i] if var.is_discrete: diff.append(len(var.values)) mins.append(-0.5) else: diff.append(self.domain_data_stat[i].max - self.domain_data_stat[i].min or 1) mins.append(self.domain_data_stat[i].min) for j, (phi, cluster_mus, cluster_sigma) in enumerate(zip(phis, mus, sigmas)): for i, (mu1, sigma1, mu2, sigma2), in enumerate( zip(cluster_mus, cluster_sigma, cluster_mus[1:], cluster_sigma[1:])): nmu1 = (mu1 - mins[i]) / diff[i] nmu2 = (mu2 - mins[i + 1]) / diff[i + 1] nsigma1 = math.sqrt(sigma1) / diff[i] nsigma2 = math.sqrt(sigma2) / diff[i + 1] polygon = ParallelCoordinatePolygon(i, nmu1, nmu2, nsigma1, nsigma2, phi, tuple(self.colors[j]) if self.colors else (0, 0, 0)) polygon.attach(self) self.replot() def compute_groups(self): key = (tuple(self.attributes), self.number_of_groups, self.number_of_steps) if key not in self.groups: def callback(i, n): self.set_progress(i, 2*n) conts = create_contingencies(self.data[:, self.attribute_indices], callback=callback) self.set_progress(50, 100) w, mu, sigma, phi = lac(conts, self.number_of_groups, self.number_of_steps) self.set_progress(100, 100) self.groups[key] = list(map(np.nan_to_num, (phi, mu, sigma))) return self.groups[key] def draw_legend(self): if self.data_has_class: if self.data_domain.has_discrete_class: self.legend().clear() values = get_variable_values_sorted(self.data_domain.class_var) for i, value in enumerate(values): self.legend().add_item( self.data_domain.class_var.name, value, OWPoint(OWPoint.Rect, QColor(*self.colors[i]), 10)) else: values = self.attr_values[self.data_domain.class_var.name] decimals = self.data_domain.class_var.number_of_decimals self.legend().add_color_gradient(self.data_domain.class_var.name, ["%%.%df" % decimals % v for v in values]) else: self.legend().clear() self.old_legend_keys = [] def draw_mid_labels(self, mid_labels): if mid_labels: for j in range(len(mid_labels)): self.addMarker(mid_labels[j], j + 0.5, 1.0, alignment=Qt.AlignCenter | Qt.AlignTop) def draw_statistics(self): """Draw lines that represent standard deviation or quartiles""" return # TODO: Implement using BasicStats if self.show_statistics and self.have_data: data = [] for attr_idx in self.attribute_indices: if not self.data_domain[attr_idx].is_continuous: data.append([()]) continue # only for continuous attributes if not self.data_has_class or self.data_has_continuous_class: # no class if self.show_statistics == MEANS: m = self.domain_data_stat[attr_idx].mean dev = self.domain_data_stat[attr_idx].var data.append([(m - dev, m, m + dev)]) elif self.show_statistics == MEDIAN: data.append([(0, 0, 0)]); continue sorted_array = np.sort(attr_values) if len(sorted_array) > 0: data.append([(sorted_array[int(len(sorted_array) / 4.0)], sorted_array[int(len(sorted_array) / 2.0)], sorted_array[int(len(sorted_array) * 0.75)])]) else: data.append([(0, 0, 0)]) else: curr = [] class_values = get_variable_values_sorted(self.data_domain.class_var) for c in range(len(class_values)): attr_values = self.data[attr_idx, self.data[self.data_class_index] == c] attr_values = attr_values[~np.isnan(attr_values)] if len(attr_values) == 0: curr.append((0, 0, 0)) continue if self.show_statistics == MEANS: m = attr_values.mean() dev = attr_values.std() curr.append((m - dev, m, m + dev)) elif self.show_statistics == MEDIAN: sorted_array = np.sort(attr_values) curr.append((sorted_array[int(len(attr_values) / 4.0)], sorted_array[int(len(attr_values) / 2.0)], sorted_array[int(len(attr_values) * 0.75)])) data.append(curr) # draw vertical lines for i in range(len(data)): for c in range(len(data[i])): if data[i][c] == (): continue x = i - 0.03 * (len(data[i]) - 1) / 2.0 + c * 0.03 col = QColor(self.discrete_palette[c]) col.setAlpha(self.alpha_value_2) self.add_curve("", col, col, 3, OWCurve.Lines, OWPoint.NoSymbol, xData=[x, x, x], yData=[data[i][c][0], data[i][c][1], data[i][c][2]], lineWidth=4) self.add_curve("", col, col, 1, OWCurve.Lines, OWPoint.NoSymbol, xData=[x - 0.03, x + 0.03], yData=[data[i][c][0], data[i][c][0]], lineWidth=4) self.add_curve("", col, col, 1, OWCurve.Lines, OWPoint.NoSymbol, xData=[x - 0.03, x + 0.03], yData=[data[i][c][1], data[i][c][1]], lineWidth=4) self.add_curve("", col, col, 1, OWCurve.Lines, OWPoint.NoSymbol, xData=[x - 0.03, x + 0.03], yData=[data[i][c][2], data[i][c][2]], lineWidth=4) # draw lines with mean/median values if not self.data_has_class or self.data_has_continuous_class: class_count = 1 else: class_count = len(self.data_domain.class_var.values) for c in range(class_count): diff = - 0.03 * (class_count - 1) / 2.0 + c * 0.03 ys = [] xs = [] for i in range(len(data)): if data[i] != [()]: ys.append(data[i][c][1]) xs.append(i + diff) else: if len(xs) > 1: col = QColor(self.discrete_palette[c]) col.setAlpha(self.alpha_value_2) self.add_curve("", col, col, 1, OWCurve.Lines, OWPoint.NoSymbol, xData=xs, yData=ys, lineWidth=4) xs = [] ys = [] col = QColor(self.discrete_palette[c]) col.setAlpha(self.alpha_value_2) self.add_curve("", col, col, 1, OWCurve.Lines, OWPoint.NoSymbol, xData=xs, yData=ys, lineWidth=4) def draw_distributions(self): """Draw distributions with discrete attributes""" if not (self.show_distributions and self.have_data and self.data_has_discrete_class): return class_count = len(self.data_domain.class_var.values) class_ = self.data_domain.class_var # we create a hash table of possible class values (happens only if we have a discrete class) if self.domain_contingencies is None: self.domain_contingencies = dict( zip([attr for attr in self.data_domain if attr.is_discrete], get_contingencies(self.raw_data, skipContinuous=True))) self.domain_contingencies[class_] = get_contingency(self.raw_data, class_, class_) max_count = max([contingency.max() for contingency in self.domain_contingencies.values()] or [1]) sorted_class_values = get_variable_values_sorted(self.data_domain.class_var) for axis_idx, attr_idx in enumerate(self.attribute_indices): attr = self.data_domain[attr_idx] if attr.is_discrete: continue contingency = self.domain_contingencies[attr] attr_len = len(attr.values) # we create a hash table of variable values and their indices sorted_variable_values = get_variable_values_sorted(attr) # create bar curve for j in range(attr_len): attribute_value = sorted_variable_values[j] value_count = contingency[:, attribute_value] for i in range(class_count): class_value = sorted_class_values[i] color = QColor(*self.colors[i]) color.setAlpha(self.alpha_value) width = float(value_count[class_value] * 0.5) / float(max_count) y_off = float(1.0 + 2.0 * j) / float(2 * attr_len) height = 0.7 / float(class_count * attr_len) y_low_bottom = y_off + float(class_count * height) / 2.0 - i * height curve = PolygonCurve(QPen(color), QBrush(color), xData=[axis_idx, axis_idx + width, axis_idx + width, axis_idx], yData=[y_low_bottom, y_low_bottom, y_low_bottom - height, y_low_bottom - height], tooltip=attr.name) curve.attach(self) # handle tooltip events def event(self, ev): if ev.type() == QEvent.ToolTip: x = self.inv_transform(xBottom, ev.pos().x()) y = self.inv_transform(yLeft, ev.pos().y()) canvas_position = self.mapToScene(ev.pos()) x_float = self.inv_transform(xBottom, canvas_position.x()) contact, (index, pos) = self.testArrowContact(int(round(x_float)), canvas_position.x(), canvas_position.y()) if contact: attr = self.data_domain[self.attributes[index]] if attr.is_continuous: condition = self.selection_conditions.get(attr.name, [0, 1]) val = self.attr_values[attr.name][0] + condition[pos] * ( self.attr_values[attr.name][1] - self.attr_values[attr.name][0]) str_val = attr.name + "= %%.%df" % attr.number_of_decimals % val QToolTip.showText(ev.globalPos(), str_val) else: for curve in self.items(): if type(curve) == PolygonCurve and \ curve.boundingRect().contains(x, y) and \ getattr(curve, "tooltip", None): (name, value, total, dist) = curve.tooltip count = sum([v[1] for v in dist]) if count == 0: continue tooltip_text = "Attribute: <b>%s</b><br>Value: <b>%s</b><br>" \ "Total instances: <b>%i</b> (%.1f%%)<br>" \ "Class distribution:<br>" % ( name, value, count, 100.0 * count / float(total)) for (val, n) in dist: tooltip_text += "&nbsp; &nbsp; <b>%s</b> : <b>%i</b> (%.1f%%)<br>" % ( val, n, 100.0 * float(n) / float(count)) QToolTip.showText(ev.globalPos(), tooltip_text[:-4]) elif ev.type() == QEvent.MouseMove: QToolTip.hideText() return OWPlot.event(self, ev) def testArrowContact(self, indices, x, y): if type(indices) != list: indices = [indices] for index in indices: if index >= len(self.attributes) or index < 0: continue int_x = self.transform(xBottom, index) bottom = self.transform(yLeft, self.selection_conditions.get(self.attributes[index], [0, 1])[0]) bottom_rect = QRect(int_x - self.bottom_pixmap.width() / 2, bottom, self.bottom_pixmap.width(), self.bottom_pixmap.height()) if bottom_rect.contains(QPoint(x, y)): return 1, (index, 0) top = self.transform(yLeft, self.selection_conditions.get(self.attributes[index], [0, 1])[1]) top_rect = QRect(int_x - self.top_pixmap.width() / 2, top - self.top_pixmap.height(), self.top_pixmap.width(), self.top_pixmap.height()) if top_rect.contains(QPoint(x, y)): return 1, (index, 1) return 0, (0, 0) def mousePressEvent(self, e): canvas_position = self.mapToScene(e.pos()) x = self.inv_transform(xBottom, canvas_position.x()) contact, info = self.testArrowContact(int(round(x)), canvas_position.x(), canvas_position.y()) if contact: self.pressed_arrow = info else: OWPlot.mousePressEvent(self, e) def mouseMoveEvent(self, e): if hasattr(self, "pressed_arrow"): canvas_position = self.mapToScene(e.pos()) y = min(1, max(0, self.inv_transform(yLeft, canvas_position.y()))) index, pos = self.pressed_arrow attr = self.data_domain[self.attributes[index]] old_condition = self.selection_conditions.get(attr.name, [0, 1]) old_condition[pos] = y self.selection_conditions[attr.name] = old_condition self.update_data(self.attributes, self.visualized_mid_labels) if attr.is_continuous: val = self.attr_values[attr.name][0] + old_condition[pos] * ( self.attr_values[attr.name][1] - self.attr_values[attr.name][0]) strVal = attr.name + "= %.2f" % val QToolTip.showText(e.globalPos(), strVal) if self.sendSelectionOnUpdate and self.auto_send_selection_callback: self.auto_send_selection_callback() else: OWPlot.mouseMoveEvent(self, e) def mouseReleaseEvent(self, e): if hasattr(self, "pressed_arrow"): del self.pressed_arrow else: OWPlot.mouseReleaseEvent(self, e) def zoom_to_rect(self, r): r.setTop(self.graph_area.top()) r.setBottom(self.graph_area.bottom()) super().zoom_to_rect(r) def removeAllSelections(self, send=1): self.selection_conditions = {} self.update_data(self.attributes, self.visualized_mid_labels) # draw the curves and the selection conditions def drawCanvas(self, painter): OWPlot.drawCanvas(self, painter) for i in range( int(max(0, math.floor(self.axisScaleDiv(xBottom).interval().minValue()))), int(min(len(self.attributes), math.ceil(self.axisScaleDiv(xBottom).interval().maxValue()) + 1))): bottom, top = self.selection_conditions.get(self.attributes[i], (0, 1)) painter.drawPixmap(self.transform(xBottom, i) - self.bottom_pixmap.width() / 2, self.transform(yLeft, bottom), self.bottom_pixmap) painter.drawPixmap(self.transform(xBottom, i) - self.top_pixmap.width() / 2, self.transform(yLeft, top) - self.top_pixmap.height(), self.top_pixmap) def auto_send_selection_callback(self): pass def clear(self): super().clear() self.attributes = [] self.visualized_mid_labels = [] self.selected_examples = [] self.unselected_examples = [] self.selection_conditions = {} # #################################################################### # a curve that is able to draw several series of lines class ParallelCoordinatesCurve(OWCurve): def __init__(self, n_attributes, y_values, color, name=""): OWCurve.__init__(self, tooltip=name) self._item = QGraphicsPathItem(self) self.path = QPainterPath() self.fitted = False self.n_attributes = n_attributes self.n_rows = int(len(y_values) / n_attributes) self.set_style(OWCurve.Lines) if isinstance(color, tuple): self.set_pen(QPen(QColor(*color))) else: self.set_pen(QPen(QColor(color))) x_values = list(range(n_attributes)) * self.n_rows self.set_data(x_values, y_values) def update_properties(self): self.redraw_path() def redraw_path(self): self.path = QPainterPath() for segment in self.segment(self.data()): if self.fitted: self.draw_cubic_path(segment) else: self.draw_normal_path(segment) self._item.setPath(self.graph_transform().map(self.path)) self._item.setPen(self.pen()) def segment(self, data): for i in range(self.n_rows): yield data[i * self.n_attributes:(i + 1) * self.n_attributes] def draw_cubic_path(self, segment): for (x1, y1), (x2, y2) in zip(segment, segment[1:]): self.path.moveTo(x1, y1) self.path.cubicTo(QPointF(x1 + 0.5, y1), QPointF(x2 - 0.5, y2), QPointF(x2, y2)) def draw_normal_path(self, segment): if not segment: return x, y = segment[0] self.path.moveTo(x, y) for x, y in segment[1:]: self.path.lineTo(x, y) class ParallelCoordinatePolygon(OWPlotItem): def __init__(self, i, mu1, mu2, sigma1, sigma2, phi, color): OWPlotItem.__init__(self) self.outer_box = QGraphicsPolygonItem(self) self.inner_box = QGraphicsPolygonItem(self) self.i = i self.mu1 = mu1 self.mu2 = mu2 self.sigma1 = sigma1 self.sigma2 = sigma2 self.phi = phi self.twosigmapolygon = QPolygonF([ QPointF(i, mu1 - sigma1), QPointF(i, mu1 + sigma1), QPointF(i + 1, mu2 + sigma2), QPointF(i + 1, mu2 - sigma2), QPointF(i, mu1 - sigma1) ]) self.sigmapolygon = QPolygonF([ QPointF(i, mu1 - .5 * sigma1), QPointF(i, mu1 + .5 * sigma1), QPointF(i + 1, mu2 + .5 * sigma2), QPointF(i + 1, mu2 - .5 * sigma2), QPointF(i, mu1 - .5 * sigma1) ]) if isinstance(color, tuple): color = QColor(*color) color.setAlphaF(.3) self.outer_box.setBrush(color) self.outer_box.setPen(QColor(0, 0, 0, 0)) self.inner_box.setBrush(color) self.inner_box.setPen(color) def update_properties(self): self.outer_box.setPolygon(self.graph_transform().map(self.twosigmapolygon)) self.inner_box.setPolygon(self.graph_transform().map(self.sigmapolygon)) def initialize_random(conts, k): mu = np.zeros((k, len(conts))) sigma = np.zeros((k, len(conts))) for i, (c, cw) in enumerate(conts): w = np.random.random((len(c), k)) w /= w.sum(axis=1)[:, None] c = c[:, 0] if i == 0 else c[:, 1] for j in range(k): mu1 = np.dot(w[:, j] * cw, c) / (w[:, j] * cw).sum() cn = c - mu1 sigma1 = np.sum(cn ** 2 * w[:, j] * cw, axis=0) / (w[:, j] * cw).sum() mu[j, i] = mu1 sigma[j, i] = sigma1 return mu, sigma def initialize_kmeans(conts, k): x = [] xm = {} for i, (c, cw) in enumerate(conts[1:-1]): oldx, oldxm, x, xm = x, xm, [], {} if i == 0: for a, w in zip(c, cw): x.append((tuple(a), w)) xm.setdefault(tuple(a)[1:], []).append(len(x) - 1) else: for a, w in zip(c, cw): for l in oldxm[tuple(a[:2])]: olda, oldw = oldx[l] x.append((olda + (a[2],), oldw+w)) xm.setdefault(tuple(a)[1:], []).append(len(x) - 1) X = np.array([y[0] for y in x]) import sklearn.cluster as skl_cluster kmeans = skl_cluster.KMeans(n_clusters=k) Y = kmeans.fit_predict(X) means = kmeans.cluster_centers_ covars = np.zeros((k, len(conts))) for j in range(k): xn = X[Y == j, :] - means[j] covars[j] = np.sum(xn ** 2, axis=0) / len(xn) return means, covars def lac(conts, k, nsteps=30, window_size=1): """ k expected classes, m data points, each with dim dimensions """ dim = len(conts) np.random.seed(42) # Initialize parameters priors = np.ones(k) / k print("Initializing") import sys; sys.stdout.flush() means, covars = initialize_random(conts, k) #means, covars = initialize_kmeans(conts, k) print("Done") w = [np.empty((k, len(c[0]),)) for c in conts] active = np.ones(k, dtype=np.bool) for i in range(1, nsteps + 1): for l, (c, cw) in enumerate(conts): lower = l - window_size if l - window_size >= 0 else None upper = l + window_size + 1 if l + window_size + 1 <= dim else None dims = slice(lower, upper) active_dim = min(l, window_size) x = c # E step for j in range(k): if any(np.abs(covars[j, dims]) < 1e-15): active[j] = 0 if active[j]: det = covars[j, dims].prod() inv_covars = 1. / covars[j, dims] xn = x - means[j, dims] factor = (2.0 * np.pi) ** (x.shape[1]/ 2.0) * det ** 0.5 w[l][j] = priors[j] * np.exp(np.sum(xn * inv_covars * xn, axis=1) * -.5) / factor else: w[l][j] = 0 w[l][active] /= w[l][active].sum(axis=0) # M step n = np.sum(w[l], axis=1) priors = n / np.sum(n) for j in range(k): if n[j]: mu = np.dot(w[l][j, :] * cw, x[:, active_dim]) / (w[l][j, :] * cw).sum() xn = x[:, active_dim] - mu sigma = np.sum(xn ** 2 * w[l][j] * cw, axis=0) / (w[l][j, :] * cw).sum() if np.isnan(mu).any() or np.isnan(sigma).any(): return w, means, covars, priors else: active[j] = 0 mu = 0. sigma = 0. means[j, l] = mu covars[j, l] = sigma # w = np.zeros((k, m)) # for j in range(k): # if active[j]: # det = covars[j].prod() # inv_covars = 1. / covars[j] # xn = X - means[j] # factor = (2.0 * np.pi) ** (xn.shape[1] / 2.0) * det ** 0.5 # w[j] = priors[j] * exp(-.5 * np.sum(xn * inv_covars * xn, axis=1)) / factor # w[active] /= w[active].sum(axis=0) return w, means, covars, priors def create_contingencies(X, callback=None): window_size = 1 dim = len(X.domain) X_ = Discretize(method=EqualFreq(n=10))(X) m = [] for i, var in enumerate(X_.domain): cleaned_values = [tuple(map(str.strip, v.strip('[]()<>=≥').split('-'))) for v in var.values] try: float_values = [[float(v) for v in vals] for vals in cleaned_values] bin_centers = { i: v[0] if len(v) == 1 else v[0] + (v[1] - v[0]) for i, v in enumerate(float_values) } except ValueError: bin_centers = { i: i for i, v in enumerate(cleaned_values) } m.append(bin_centers) from Orange.data.sql.table import SqlTable if isinstance(X, SqlTable): conts = [] al = len(X.domain) if al > 1: conts.append(create_sql_contingency(X_, [0, 1], m)) if callback: callback(1, al) for a1, a2, a3 in zip(range(al), range(1, al), range(2, al)): conts.append(create_sql_contingency(X_, [a1, a2, a3], m)) if callback: callback(a3, al) if al > 2: conts.append(create_sql_contingency(X_, [al-2, al-1], m)) if callback: callback(al, al) else: conts = [defaultdict(float) for i in range(len(X_.domain))] for i, r in enumerate(X_): if any(np.isnan(r)): continue row = tuple(m[vi].get(v) for vi, v in enumerate(r)) for l in range(len(X_.domain)): lower = l - window_size if l - window_size >= 0 else None upper = l + window_size + 1 if l + window_size + 1 <= dim else None dims = slice(lower, upper) conts[l][row[dims]] += 1 conts = [zip(*x.items()) for x in conts] conts = [(np.array(c), np.array(cw)) for c, cw in conts] # for i, ((c1, cw1), (c2, cw2)) in enumerate(zip(contss, conts)): # a = np.sort(np.hstack((c1, cw1[:, None])), axis=0) # b = np.sort(np.hstack((c2, cw2[:, None])), axis=0) # assert_almost_equal(a, b) return conts def create_sql_contingency(X, columns, m): def convert(row): c = len(row) - 1 return [m[columns[i]].get(v) if i != c else v for i, v in enumerate(row)] group_by = [a.to_sql() for a in (X.domain[c] for c in columns)] filters = ['%s IS NOT NULL' % a for a in group_by] fields = group_by + ['COUNT(%s)' % group_by[0]] query = X._sql_query(fields, group_by=group_by, filters=filters) with X._execute_sql_query(query) as cur: cont = np.array(list(map(convert, cur.fetchall())), dtype='float') return cont[:, :-1], cont[:, -1:].flatten()
bsd-2-clause
rahulk90/inference_introspection
optvaemodels/vae_evaluate.py
2
2979
import numpy as np from theano import config import theano.tensor as T from sklearn.decomposition import PCA """ External function to deal with evaluation of model """ def getPrior(vae, nsamples=100): """ Sample from Prior """ z = np.random.randn(nsamples,vae.params['dim_stochastic']) return z def sample(vae, nsamples=100): z = getPrior(vae,nsamples) return vae.reconstruct(z.astype(config.floatX)) def infer(vae, data): """ Posterior Inference using recognition network """ assert len(data.shape)==2,'Expecting 2D data matrix' assert data.shape[1]==vae.params['dim_observations'],'Wrong dimensions for observations' return vae.inference(X=data.astype(config.floatX)) def getInitFinal(vae, dataset, batch_size): N = dataset.shape[0] init_mulist, final_mulist = [], [] init_logcovlist, final_logcovlist = [], [] for bnum,st_idx in enumerate(range(0,N,batch_size)): if bnum%1000==0: print bnum, end_idx = min(st_idx+batch_size, N) X = dataset[st_idx:end_idx] if X.__class__.__name__=='csr_matrix' or X.__class__.__name__=='csc_matrix': X = X.toarray() X = X.astype(config.floatX) mu_0, logcov_0, mu_f, logcov_f = vae.init_final_params(X=X) init_mulist.append(mu_0) init_logcovlist.append(logcov_0) final_mulist.append(mu_f) final_logcovlist.append(logcov_f) print '... done init_final' retVals = {} retVals['mu_0'] = np.concatenate(init_mulist, axis=0) retVals['logcov_0'] = np.concatenate(init_logcovlist, axis=0) retVals['mu_f'] = np.concatenate(final_mulist, axis=0) retVals['logcov_f'] = np.concatenate(final_logcovlist, axis=0) return retVals def evaluateBound(vae, dataset, batch_size): """ Evaluate bound on dataset """ N = dataset.shape[0] bd_0,bd_f = 0,0 perp0,perpf = 0,0 diff_elbo = 0 for bnum,st_idx in enumerate(range(0,N,batch_size)): if bnum%1000==0: print bnum, end_idx = min(st_idx+batch_size, N) X = dataset[st_idx:end_idx] if X.__class__.__name__=='csr_matrix' or X.__class__.__name__=='csc_matrix': X = X.toarray() X = X.astype(config.floatX) if vae.params['data_type']=='bow': perp_0, perp_f, n_steps, d_elbo = vae.evaluatePerp(X=X) bd_0 += perp_0 bd_f += perp_f else: elbo_0, elbo_f, n_steps, d_elbo = vae.evaluate(X=X) bd_0 += elbo_0 bd_f += elbo_f diff_elbo+= d_elbo print '.... done evaluation' bd_0 /= float(N) bd_f /= float(N) diff_elbo /= float(N) retVals = {} if vae.params['data_type']=='bow': retVals['perp_0']= np.exp(bd_0) retVals['perp_f']= np.exp(bd_f) else: retVals['elbo_0']= bd_0 retVals['elbo_f']= bd_f retVals['diff_elbo'] = diff_elbo return retVals
mit
sonnyhu/scikit-learn
examples/decomposition/plot_pca_iris.py
65
1485
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= PCA example with Iris Data-set ========================================================= Principal Component Analysis applied to the Iris dataset. See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more information on this dataset. """ print(__doc__) # Code source: Gaël Varoquaux # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn import decomposition from sklearn import datasets np.random.seed(5) centers = [[1, 1], [-1, -1], [1, -1]] iris = datasets.load_iris() X = iris.data y = iris.target fig = plt.figure(1, figsize=(4, 3)) plt.clf() ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134) plt.cla() pca = decomposition.PCA(n_components=3) pca.fit(X) X = pca.transform(X) for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]: ax.text3D(X[y == label, 0].mean(), X[y == label, 1].mean() + 1.5, X[y == label, 2].mean(), name, horizontalalignment='center', bbox=dict(alpha=.5, edgecolor='w', facecolor='w')) # Reorder the labels to have colors matching the cluster results y = np.choose(y, [1, 2, 0]).astype(np.float) ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral) ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) plt.show()
bsd-3-clause
TUW-GEO/rt1
tests/test_rtmetrics.py
1
4649
import unittest from itertools import combinations import pandas as pd import numpy as np import matplotlib.pyplot as plt from rt1.rtmetrics import RTmetrics from rt1 import rtfits class TestRTMetrics(unittest.TestCase): @staticmethod def mock_series(): d1 = pd.Series([43.04, 20.55, 8.98, -15.27, 29.18, -12.48, 78.35, 92.73, -23.31, 14.12]) d2 = pd.Series([11.52, 116.34, 60.88, 9.73, 35.51, 26.69, 119.53, -16.41, 25.05, -68.33]) results = { 'pearson': 0.13745925602259895, 'spearman': 0.10303030303030303, 'linregress': { 'slope': 0.2014594067161711, 'intercept': 27.29877405497224, 'pearson': 0.13745925602259895, 'pvalue': 0.7049253466715144, 'stderr': 0.513246861349088}, 'rmsd': 61.48676800743392, 'ub_rmsd': 60.901701092826634, 'bias': -8.462000000000003, 'mae': 53.084, 'mape': 2.578235110085792, 'std_ratio': 0.682317387225608} return (d1, d2, results) @staticmethod def mock_fit(): return rtfits.load('./tests/sig0_dB.dump') def test_metrics(self): d1, d2, expected_values = self.mock_series() # loop through all possible metrics for metric in RTmetrics.metrics_registry: if metric == 'linregress': continue res = getattr(RTmetrics, metric)(d1, d2) self.assertAlmostEqual(res, expected_values[metric]) def test_linregress(self): d1, d2, expected_values = self.mock_series() linregress = RTmetrics.linregress(d1, d2) linregress_expected = expected_values['linregress'] self.assertAlmostEqual(linregress['slope'], linregress_expected['slope']) self.assertAlmostEqual(linregress['intercept'], linregress_expected['intercept']) self.assertAlmostEqual(linregress['pearson'], linregress_expected['pearson']) self.assertAlmostEqual(linregress['pvalue'], linregress_expected['pvalue']) self.assertAlmostEqual(linregress['stderr'], linregress_expected['stderr']) def test_fit_metric(self): fit = self.mock_fit() # loop through all possible parameter combinations for [p1, s1], [p2, s2] in combinations( fit.metric._all_keys.items(), 2): metric_fit_params = getattr(getattr(fit.metric, p1), p2) # loop through all possible metrics for metric in RTmetrics.metrics_registry: fit_metric = getattr(metric_fit_params, metric) # remove suffix if present if p1.endswith(f'__{s1}'): p1 = p1[:-len(f'__{s1}')] if p2.endswith(f'__{s2}'): p2 = p2[:-len(f'__{s2}')] d1 = getattr(fit, s1) if s1 == 'calc_model': d1 = getattr(d1(return_components=True), p1) else: d1 = getattr(d1, p1) d2 = getattr(fit, s2) if s2 == 'calc_model': d2 = getattr(d2(return_components=True), p2) else: d2 = getattr(d2, p2) df = pd.concat([d1, d2], axis=1) fit_func = getattr(RTmetrics, metric)(df[p1], df[p2]) assertmsg = ("there's been something wrong during metric-evaluation!\n" f' p1="{p1}", p2="{p2}", metric="{metric}"\n' + f' fit_metric={fit_metric}\n' + f' fit_func={fit_func}\n') if metric == 'linregress': for key, val in fit_metric.items(): if np.isnan(val): assert np.isnan(fit_func[key]), assertmsg else: assert val == fit_func[key], assertmsg else: if np.isnan(fit_metric): assert np.isnan(fit_func), assertmsg else: assert fit_metric == fit_func, assertmsg def test_scatterplot(self): plt.ion() fit = self.mock_fit() _ = RTmetrics.scatterplot(fit.res_df.SM, fit.res_df.omega, "SM", "omega") if __name__ == "__main__": unittest.main()
apache-2.0