repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
morrellk/openelections-data-or | src/progress.py | 1 | 3077 | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2017 Nick Kocharhook
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import csv
import os
import argparse
import collections
import pandas
def main():
args = parseArguments()
progress = OEProgress(args.path)
progress.printProgress()
class OEProgress(object):
def __init__(self, path):
self.path = path
self.counts = collections.defaultdict(int)
self.weightedCounts = collections.defaultdict(int)
self.statuses = {'': 'Incomplete', 'done': 'Complete', 'n/a': 'Source missing'}
self.populateResults()
def populateResults(self):
self.elections = pandas.read_csv(self.path).fillna('')
def printProgress(self):
for index, series in self.elections.iterrows():
precinctCount = series['precinct count']
primaries = series.filter(regex='primary$')
generals = series.filter(regex='general$')
for elections in [primaries, generals]:
gb = elections.groupby(elections)
# Because primaries involve two complete elections, the number of results is doubled
multiplier = 2 if elections.equals(primaries) else 1
for group, values in gb:
self.counts[group] += len(values.index)
self.weightedCounts[group] += len(values.index) * precinctCount * multiplier
def printCount(counts, name):
countSum = sum(counts.values())
print(f"By {name}:")
for status, count in counts.items():
print("{:>15} {:>5} {:>6.1%}".format(self.statuses[status], count, count/countSum))
print("{:>15} {:>5}".format("Total", countSum))
printCount(self.counts, 'election')
printCount(self.weightedCounts, 'precinct')
def parseArguments():
parser = argparse.ArgumentParser(description='Verify votes are correct using a simple checksum')
# parser.add_argument('--verbose', '-v', dest='verbose', action='store_true')
parser.add_argument('path', type=str, help='Path to county_matrix CSV file.')
parser.set_defaults(verbose=False)
return parser.parse_args()
# Default function is main()
if __name__ == '__main__':
main() | mit |
indhub/mxnet | example/named_entity_recognition/src/preprocess.py | 10 | 2002 | # !/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
#read in csv of NER training data
df = pd.read_csv("../data/ner_dataset.csv", encoding="ISO-8859-1")
#rename columns
df = df.rename(columns = {"Sentence #" : "utterance_id",
"Word" : "token",
"POS" : "POS_tag",
"Tag" : "BILOU_tag"})
#clean utterance_id column
df.loc[:, "utterance_id"] = df["utterance_id"].str.replace('Sentence: ', '')
#fill np.nan utterance ID's with the last valid entry
df = df.fillna(method='ffill')
df.loc[:, "utterance_id"] = df["utterance_id"].apply(int)
#melt BILOU tags and tokens into an array per utterance
df1 = df.groupby("utterance_id")["BILOU_tag"].apply(lambda x: np.array(x)).to_frame().reset_index()
df2 = df.groupby("utterance_id")["token"].apply(lambda x: np.array(x)).to_frame().reset_index()
df3 = df.groupby("utterance_id")["POS_tag"].apply(lambda x: np.array(x)).to_frame().reset_index()
#join the results on utterance id
df = df1.merge(df2.merge(df3, how = "left", on = "utterance_id"), how = "left", on = "utterance_id")
#save the dataframe to a csv file
df.to_pickle("../data/ner_data.pkl") | apache-2.0 |
clemkoa/scikit-learn | benchmarks/bench_mnist.py | 45 | 6977 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
------------------------------------------------------------
MLP_adam 53.46s 0.11s 0.0224
Nystroem-SVM 112.97s 0.92s 0.0228
MultilayerPerceptron 24.33s 0.14s 0.0287
ExtraTrees 42.99s 0.57s 0.0294
RandomForest 42.70s 0.49s 0.0318
SampledRBF-SVM 135.81s 0.56s 0.0486
LinearRegression-SAG 16.67s 0.06s 0.0824
CART 20.69s 0.02s 0.1219
dummy 0.00s 0.01s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM': make_pipeline(
Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM': make_pipeline(
RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LogisticRegression-SAG': LogisticRegression(solver='sag', tol=1e-1,
C=1e4),
'LogisticRegression-SAGA': LogisticRegression(solver='saga', tol=1e-1,
C=1e4),
'MultilayerPerceptron': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
solver='sgd', learning_rate_init=0.2, momentum=0.9, verbose=1,
tol=1e-4, random_state=1),
'MLP-adam': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
solver='adam', learning_rate_init=0.001, verbose=1,
tol=1e-4, random_state=1)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
marcusmueller/gnuradio | gnuradio-runtime/examples/volk_benchmark/volk_plot.py | 6 | 6198 | #!/usr/bin/env python
from __future__ import division
from __future__ import unicode_literals
import sys, math
import argparse
from volk_test_funcs import *
try:
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
sys.stderr.write("Could not import Matplotlib (http://matplotlib.sourceforge.net/)\n")
sys.exit(1)
def main():
desc='Plot Volk performance results from a SQLite database. ' + \
'Run one of the volk tests first (e.g, volk_math.py)'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-D', '--database', type=str,
default='volk_results.db',
help='Database file to read data from [default: %(default)s]')
parser.add_argument('-E', '--errorbars',
action='store_true', default=False,
help='Show error bars (1 standard dev.)')
parser.add_argument('-P', '--plot', type=str,
choices=['mean', 'min', 'max'],
default='mean',
help='Set the type of plot to produce [default: %(default)s]')
parser.add_argument('-%', '--percent', type=str,
default=None, metavar="table",
help='Show percent difference to the given type [default: %(default)s]')
args = parser.parse_args()
# Set up global plotting properties
matplotlib.rcParams['figure.subplot.bottom'] = 0.2
matplotlib.rcParams['figure.subplot.top'] = 0.95
matplotlib.rcParams['figure.subplot.right'] = 0.98
matplotlib.rcParams['ytick.labelsize'] = 16
matplotlib.rcParams['xtick.labelsize'] = 16
matplotlib.rcParams['legend.fontsize'] = 18
# Get list of tables to compare
conn = create_connection(args.database)
tables = list_tables(conn)
M = len(tables)
# Colors to distinguish each table in the bar graph
# More than 5 tables will wrap around to the start.
colors = ['b', 'r', 'g', 'm', 'k']
# Set up figure for plotting
f0 = plt.figure(0, facecolor='w', figsize=(14,10))
s0 = f0.add_subplot(1,1,1)
# Create a register of names that exist in all tables
tmp_regs = []
for table in tables:
# Get results from the next table
res = get_results(conn, table[0])
tmp_regs.append(list())
for r in res:
try:
tmp_regs[-1].index(r['kernel'])
except ValueError:
tmp_regs[-1].append(r['kernel'])
# Get only those names that are common in all tables
name_reg = tmp_regs[0]
for t in tmp_regs[1:]:
name_reg = list(set(name_reg) & set(t))
name_reg.sort()
# Pull the data out for each table into a dictionary
# we can ref the table by it's name and the data associated
# with a given kernel in name_reg by it's name.
# This ensures there is no sorting issue with the data in the
# dictionary, so the kernels are plotted against each other.
table_data = dict()
for i,table in enumerate(tables):
# Get results from the next table
res = get_results(conn, table[0])
data = dict()
for r in res:
data[r['kernel']] = r
table_data[table[0]] = data
if args.percent is not None:
for i,t in enumerate(table_data):
if args.percent == t:
norm_data = []
for name in name_reg:
if(args.plot == 'max'):
norm_data.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
norm_data.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
norm_data.append(table_data[t][name]['avg'])
# Plot the results
x0 = list(range(len(name_reg)))
i = 0
for t in (table_data):
ydata = []
stds = []
for name in name_reg:
stds.append(math.sqrt(table_data[t][name]['var']))
if(args.plot == 'max'):
ydata.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
ydata.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
ydata.append(table_data[t][name]['avg'])
if args.percent is not None:
ydata = [-100*(y-n)/y for y,n in zip(ydata,norm_data)]
if(args.percent != t):
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80 / (M-1)
x1 = [x + i*wdth for x in x0]
i += 1
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80 / M
x1 = [x + i*wdth for x in x0]
i += 1
if(args.errorbars is False):
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
s0.bar(x1, ydata, width=wdth,
yerr=stds,
color=colors[i%M], label=t,
edgecolor='k', linewidth=2,
error_kw={"ecolor": 'k', "capsize":5,
"linewidth":2})
nitems = res[0]['nitems']
if args.percent is None:
s0.set_ylabel("Processing time (sec) [{0:G} items]".format(nitems),
fontsize=22, fontweight='bold',
horizontalalignment='center')
else:
s0.set_ylabel("% Improvement over {0} [{1:G} items]".format(
args.percent, nitems),
fontsize=22, fontweight='bold')
s0.legend()
s0.set_xticks(x0)
s0.set_xticklabels(name_reg)
for label in s0.xaxis.get_ticklabels():
label.set_rotation(45)
label.set_fontsize(16)
plt.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
krez13/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 127 | 7477 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
lazywei/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
466152112/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
NYU-CAL/Disco | Python/floopAnalysis.py | 1 | 1101 | import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import discopy.util as util
import discopy.geom as geom
def analyzeSingle(filename):
opts = util.loadOpts(filename)
pars = util.loadPars(filename)
print("Loading " + filename)
t, x1, x2, x3, prim, dat = util.loadCheckpoint(filename)
B1 = prim[:,5]
B2 = prim[:,6]
B3 = prim[:,7]
b2 = B1*B1 + B2*B2 + B3*B3
eB = 0.5 * geom.integrate(b2, dat, opts, pars)
return t, eB
def analyze(filenames):
N = len(filenames)
eB = np.empty(N)
t = np.empty(N)
for i,f in enumerate(filenames):
t[i], eB[i] = analyzeSingle(f)
figname = "floopMagneticEnergy.png"
fig, ax = plt.subplots(1,1)
ax.plot(t, eB/eB[0], 'k+')
ax.set_xlabel("t")
ax.set_ylabel(r"$e_B(t)/e_B(t=0)$")
ax.set_ylim(0.85,1.0)
print("Saving " + figname)
fig.savefig("floopMagneticEnergy.png")
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Need a checkpoint dude!")
sys.exit()
filenames = sys.argv[1:]
analyze(filenames)
| gpl-3.0 |
yavalvas/yav_com | build/matplotlib/lib/matplotlib/artist.py | 11 | 41588 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import re
import warnings
import inspect
import matplotlib
import matplotlib.cbook as cbook
from matplotlib import docstring, rcParams
from .transforms import Bbox, IdentityTransform, TransformedBbox, \
TransformedPath, Transform
from .path import Path
## Note, matplotlib artists use the doc strings for set and get
# methods to enable the introspection methods of setp and getp. Every
# set_* method should have a docstring containing the line
#
# ACCEPTS: [ legal | values ]
#
# and aliases for setters and getters should have a docstring that
# starts with 'alias for ', as in 'alias for set_somemethod'
#
# You may wonder why we use so much boiler-plate manually defining the
# set_alias and get_alias functions, rather than using some clever
# python trick. The answer is that I need to be able to manipulate
# the docstring, and there is no clever way to do that in python 2.2,
# as far as I can see - see
# http://groups.google.com/groups?hl=en&lr=&threadm=mailman.5090.1098044946.5135.python-list%40python.org&rnum=1&prev=/groups%3Fq%3D__doc__%2Bauthor%253Ajdhunter%2540ace.bsd.uchicago.edu%26hl%3Den%26btnG%3DGoogle%2BSearch
def allow_rasterization(draw):
"""
Decorator for Artist.draw method. Provides routines
that run before and after the draw call. The before and after functions
are useful for changing artist-dependant renderer attributes or making
other setup function calls, such as starting and flushing a mixed-mode
renderer.
"""
def before(artist, renderer):
if artist.get_rasterized():
renderer.start_rasterizing()
if artist.get_agg_filter() is not None:
renderer.start_filter()
def after(artist, renderer):
if artist.get_agg_filter() is not None:
renderer.stop_filter(artist.get_agg_filter())
if artist.get_rasterized():
renderer.stop_rasterizing()
# the axes class has a second argument inframe for its draw method.
def draw_wrapper(artist, renderer, *args, **kwargs):
before(artist, renderer)
draw(artist, renderer, *args, **kwargs)
after(artist, renderer)
# "safe wrapping" to exactly replicate anything we haven't overridden above
draw_wrapper.__name__ = draw.__name__
draw_wrapper.__dict__ = draw.__dict__
draw_wrapper.__doc__ = draw.__doc__
draw_wrapper._supports_rasterization = True
return draw_wrapper
class Artist(object):
"""
Abstract base class for someone who renders into a
:class:`FigureCanvas`.
"""
aname = 'Artist'
zorder = 0
def __init__(self):
self.figure = None
self._transform = None
self._transformSet = False
self._visible = True
self._animated = False
self._alpha = None
self.clipbox = None
self._clippath = None
self._clipon = True
self._lod = False
self._label = ''
self._picker = None
self._contains = None
self._rasterized = None
self._agg_filter = None
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
try:
self.axes = None
except AttributeError:
# Handle self.axes as a read-only property, as in Figure.
pass
self._remove_method = None
self._url = None
self._gid = None
self._snap = None
self._sketch = rcParams['path.sketch']
self._path_effects = rcParams['path.effects']
def __getstate__(self):
d = self.__dict__.copy()
# remove the unpicklable remove method, this will get re-added on load
# (by the axes) if the artist lives on an axes.
d['_remove_method'] = None
return d
def remove(self):
"""
Remove the artist from the figure if possible. The effect
will not be visible until the figure is redrawn, e.g., with
:meth:`matplotlib.axes.Axes.draw_idle`. Call
:meth:`matplotlib.axes.Axes.relim` to update the axes limits
if desired.
Note: :meth:`~matplotlib.axes.Axes.relim` will not see
collections even if the collection was added to axes with
*autolim* = True.
Note: there is no support for removing the artist's legend entry.
"""
# There is no method to set the callback. Instead the parent should
# set the _remove_method attribute directly. This would be a
# protected attribute if Python supported that sort of thing. The
# callback has one parameter, which is the child to be removed.
if self._remove_method is not None:
self._remove_method(self)
else:
raise NotImplementedError('cannot remove artist')
# TODO: the fix for the collections relim problem is to move the
# limits calculation into the artist itself, including the property of
# whether or not the artist should affect the limits. Then there will
# be no distinction between axes.add_line, axes.add_patch, etc.
# TODO: add legend support
def have_units(self):
'Return *True* if units are set on the *x* or *y* axes'
ax = self.axes
if ax is None or ax.xaxis is None:
return False
return ax.xaxis.have_units() or ax.yaxis.have_units()
def convert_xunits(self, x):
"""For artists in an axes, if the xaxis has units support,
convert *x* using xaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.xaxis is None:
#print 'artist.convert_xunits no conversion: ax=%s'%ax
return x
return ax.xaxis.convert_units(x)
def convert_yunits(self, y):
"""For artists in an axes, if the yaxis has units support,
convert *y* using yaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.yaxis is None:
return y
return ax.yaxis.convert_units(y)
def set_axes(self, axes):
"""
Set the :class:`~matplotlib.axes.Axes` instance in which the
artist resides, if any.
ACCEPTS: an :class:`~matplotlib.axes.Axes` instance
"""
self.axes = axes
def get_axes(self):
"""
Return the :class:`~matplotlib.axes.Axes` instance the artist
resides in, or *None*
"""
return self.axes
def get_window_extent(self, renderer):
"""
Get the axes bounding box in display space.
Subclasses should override for inclusion in the bounding box
"tight" calculation. Default is to return an empty bounding
box at 0, 0.
Be careful when using this function, the results will not update
if the artist window extent of the artist changes. The extent
can change due to any changes in the transform stack, such as
changing the axes limits, the figure size, or the canvas used
(as is done when saving a figure). This can lead to unexpected
behavior where interactive figures will look fine on the screen,
but will save incorrectly.
"""
return Bbox([[0, 0], [0, 0]])
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
For adding callbacks
"""
try:
del self._propobservers[oid]
except KeyError:
pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in six.iteritems(self._propobservers):
func(self)
def is_transform_set(self):
"""
Returns *True* if :class:`Artist` has a transform explicitly
set.
"""
return self._transformSet
def set_transform(self, t):
"""
Set the :class:`~matplotlib.transforms.Transform` instance
used by this artist.
ACCEPTS: :class:`~matplotlib.transforms.Transform` instance
"""
self._transform = t
self._transformSet = True
self.pchanged()
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this artist.
"""
if self._transform is None:
self._transform = IdentityTransform()
elif (not isinstance(self._transform, Transform)
and hasattr(self._transform, '_as_mpl_transform')):
self._transform = self._transform._as_mpl_transform(self.axes)
return self._transform
def hitlist(self, event):
"""
List the children of the artist which contain the mouse event *event*.
"""
L = []
try:
hascursor, info = self.contains(event)
if hascursor:
L.append(self)
except:
import traceback
traceback.print_exc()
print("while checking", self.__class__)
for a in self.get_children():
L.extend(a.hitlist(event))
return L
def get_children(self):
"""
Return a list of the child :class:`Artist`s this
:class:`Artist` contains.
"""
return []
def contains(self, mouseevent):
"""Test whether the artist contains the mouse event.
Returns the truth value and a dictionary of artist specific details of
selection, such as which points are contained in the pick radius. See
individual artists for details.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
warnings.warn("'%s' needs 'contains' method" % self.__class__.__name__)
return False, {}
def set_contains(self, picker):
"""
Replace the contains test used by this artist. The new picker
should be a callable function which determines whether the
artist is hit by the mouse event::
hit, props = picker(artist, mouseevent)
If the mouse event is over the artist, return *hit* = *True*
and *props* is a dictionary of properties you want returned
with the contains test.
ACCEPTS: a callable function
"""
self._contains = picker
def get_contains(self):
"""
Return the _contains test used by the artist, or *None* for default.
"""
return self._contains
def pickable(self):
'Return *True* if :class:`Artist` is pickable.'
return (self.figure is not None and
self.figure.canvas is not None and
self._picker is not None)
def pick(self, mouseevent):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if *mouseevent* is over
the artist and the artist has picker set
"""
# Pick self
if self.pickable():
picker = self.get_picker()
if six.callable(picker):
inside, prop = picker(self, mouseevent)
else:
inside, prop = self.contains(mouseevent)
if inside:
self.figure.canvas.pick_event(mouseevent, self, **prop)
# Pick children
for a in self.get_children():
# make sure the event happened in the same axes
ax = getattr(a, 'axes', None)
if mouseevent.inaxes is None or ax is None or \
mouseevent.inaxes == ax:
# we need to check if mouseevent.inaxes is None
# because some objects associated with an axes (e.g., a
# tick label) can be outside the bounding box of the
# axes and inaxes will be None
# also check that ax is None so that it traverse objects
# which do no have an axes property but children might
a.pick(mouseevent)
def set_picker(self, picker):
"""
Set the epsilon for picking used by this artist
*picker* can be one of the following:
* *None*: picking is disabled for this artist (default)
* A boolean: if *True* then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist
* A float: if picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if it's data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, e.g., the indices of the data within
epsilon of the pick event
* A function: if picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event::
hit, props = picker(artist, mouseevent)
to determine the hit test. if the mouse event is over the
artist, return *hit=True* and props is a dictionary of
properties you want added to the PickEvent attributes.
ACCEPTS: [None|float|boolean|callable]
"""
self._picker = picker
def get_picker(self):
'Return the picker object used by this artist'
return self._picker
def is_figure_set(self):
"""
Returns True if the artist is assigned to a
:class:`~matplotlib.figure.Figure`.
"""
return self.figure is not None
def get_url(self):
"""
Returns the url
"""
return self._url
def set_url(self, url):
"""
Sets the url for the artist
ACCEPTS: a url string
"""
self._url = url
def get_gid(self):
"""
Returns the group id
"""
return self._gid
def set_gid(self, gid):
"""
Sets the (group) id for the artist
ACCEPTS: an id string
"""
self._gid = gid
def get_snap(self):
"""
Returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg and MacOSX backends.
"""
if rcParams['path.snap']:
return self._snap
else:
return False
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg and MacOSX backends.
"""
self._snap = snap
def get_sketch_params(self):
"""
Returns the sketch parameters for the artist.
Returns
-------
sketch_params : tuple or `None`
A 3-tuple with the following elements:
* `scale`: The amplitude of the wiggle perpendicular to the
source line.
* `length`: The length of the wiggle along the line.
* `randomness`: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Sets the the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128.0)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16.0)
"""
if scale is None:
self._sketch = None
else:
self._sketch = (scale, length or 128.0, randomness or 16.0)
def set_path_effects(self, path_effects):
"""
set path_effects, which should be a list of instances of
matplotlib.patheffect._Base class or its derivatives.
"""
self._path_effects = path_effects
def get_path_effects(self):
return self._path_effects
def get_figure(self):
"""
Return the :class:`~matplotlib.figure.Figure` instance the
artist belongs to.
"""
return self.figure
def set_figure(self, fig):
"""
Set the :class:`~matplotlib.figure.Figure` instance the artist
belongs to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
self.figure = fig
self.pchanged()
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
self.clipbox = clipbox
self.pchanged()
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
* a :class:`~matplotlib.patches.Patch` (or subclass) instance
* a :class:`~matplotlib.path.Path` instance, in which case
an optional :class:`~matplotlib.transforms.Transform`
instance may be provided, which will be applied to the
path before using it for clipping.
* *None*, to remove the clipping path
For efficiency, if the path happens to be an axis-aligned
rectangle, this method will set the clipping box to the
corresponding rectangle and set the clipping path to *None*.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
from matplotlib.patches import Patch, Rectangle
success = False
if transform is None:
if isinstance(path, Rectangle):
self.clipbox = TransformedBbox(Bbox.unit(),
path.get_transform())
self._clippath = None
success = True
elif isinstance(path, Patch):
self._clippath = TransformedPath(
path.get_path(),
path.get_transform())
success = True
elif isinstance(path, tuple):
path, transform = path
if path is None:
self._clippath = None
success = True
elif isinstance(path, Path):
self._clippath = TransformedPath(path, transform)
success = True
elif isinstance(path, TransformedPath):
self._clippath = path
success = True
if not success:
print(type(path), type(transform))
raise TypeError("Invalid arguments to set_clip_path")
self.pchanged()
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on all
backends
"""
return self._alpha
def get_visible(self):
"Return the artist's visiblity"
return self._visible
def get_animated(self):
"Return the artist's animated state"
return self._animated
def get_clip_on(self):
'Return whether artist uses clipping'
return self._clipon
def get_clip_box(self):
'Return artist clipbox'
return self.clipbox
def get_clip_path(self):
'Return artist clip path'
return self._clippath
def get_transformed_clip_path_and_affine(self):
'''
Return the clip path with the non-affine part of its
transformation applied, and the remaining affine part of its
transformation.
'''
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
When False artists will be visible out side of the axes which
can lead to unexpected results.
ACCEPTS: [True | False]
"""
self._clipon = b
self.pchanged()
def _set_gc_clip(self, gc):
'Set the clip properly for the gc'
if self._clipon:
if self.clipbox is not None:
gc.set_clip_rectangle(self.clipbox)
gc.set_clip_path(self._clippath)
else:
gc.set_clip_rectangle(None)
gc.set_clip_path(None)
def get_rasterized(self):
"return True if the artist is to be rasterized"
return self._rasterized
def set_rasterized(self, rasterized):
"""
Force rasterized (bitmap) drawing in vector backend output.
Defaults to None, which implies the backend's default behavior
ACCEPTS: [True | False | None]
"""
if rasterized and not hasattr(self.draw, "_supports_rasterization"):
warnings.warn("Rasterization of '%s' will be ignored" % self)
self._rasterized = rasterized
def get_agg_filter(self):
"return filter function to be used for agg filter"
return self._agg_filter
def set_agg_filter(self, filter_func):
"""
set agg_filter fuction.
"""
self._agg_filter = filter_func
def draw(self, renderer, *args, **kwargs):
'Derived classes drawing method'
if not self.get_visible():
return
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends.
ACCEPTS: float (0.0 transparent through 1.0 opaque)
"""
self._alpha = alpha
self.pchanged()
def set_lod(self, on):
"""
Set Level of Detail on or off. If on, the artists may examine
things like the pixel width of the axes and draw a subset of
their contents accordingly
ACCEPTS: [True | False]
"""
self._lod = on
self.pchanged()
def set_visible(self, b):
"""
Set the artist's visiblity.
ACCEPTS: [True | False]
"""
self._visible = b
self.pchanged()
def set_animated(self, b):
"""
Set the artist's animation state.
ACCEPTS: [True | False]
"""
self._animated = b
self.pchanged()
def update(self, props):
"""
Update the properties of this :class:`Artist` from the
dictionary *prop*.
"""
store = self.eventson
self.eventson = False
changed = False
for k, v in six.iteritems(props):
func = getattr(self, 'set_' + k, None)
if func is None or not six.callable(func):
raise AttributeError('Unknown property %s' % k)
func(v)
changed = True
self.eventson = store
if changed:
self.pchanged()
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: string or anything printable with '%s' conversion.
"""
if s is not None:
self._label = '%s' % (s, )
else:
self._label = None
self.pchanged()
def get_zorder(self):
"""
Return the :class:`Artist`'s zorder.
"""
return self.zorder
def set_zorder(self, level):
"""
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
ACCEPTS: any number
"""
self.zorder = level
self.pchanged()
def update_from(self, other):
'Copy properties from *other* to *self*.'
self._transform = other._transform
self._transformSet = other._transformSet
self._visible = other._visible
self._alpha = other._alpha
self.clipbox = other.clipbox
self._clipon = other._clipon
self._clippath = other._clippath
self._lod = other._lod
self._label = other._label
self._sketch = other._sketch
self._path_effects = other._path_effects
self.pchanged()
def properties(self):
"""
return a dictionary mapping property name -> value for all Artist props
"""
return ArtistInspector(self).properties()
def set(self, **kwargs):
"""
A tkstyle set command, pass *kwargs* to set properties
"""
ret = []
for k, v in six.iteritems(kwargs):
k = k.lower()
funcName = "set_%s" % k
func = getattr(self, funcName)
ret.extend([func(v)])
return ret
def findobj(self, match=None, include_self=True):
"""
Find artist objects.
Recursively find all :class:`~matplotlib.artist.Artist` instances
contained in self.
*match* can be
- None: return all objects contained in artist.
- function with signature ``boolean = match(artist)``
used to filter matches
- class instance: e.g., Line2D. Only return artists of class type.
If *include_self* is True (default), include self in the list to be
checked for a match.
"""
if match is None: # always return True
def matchfunc(x):
return True
elif cbook.issubclass_safe(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif six.callable(match):
matchfunc = match
else:
raise ValueError('match must be None, a matplotlib.artist.Artist '
'subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc for thisc in
c.findobj(matchfunc, include_self=False)
if matchfunc(thisc)])
if include_self and matchfunc(self):
artists.append(self)
return artists
class ArtistInspector:
"""
A helper class to inspect an :class:`~matplotlib.artist.Artist`
and return information about it's settable properties and their
current values.
"""
def __init__(self, o):
"""
Initialize the artist inspector with an
:class:`~matplotlib.artist.Artist` or sequence of :class:`Artists`.
If a sequence is used, we assume it is a homogeneous sequence (all
:class:`Artists` are of the same type) and it is your responsibility
to make sure this is so.
"""
if cbook.iterable(o) and len(o):
o = o[0]
self.oorig = o
if not isinstance(o, type):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
def get_aliases(self):
"""
Get a dict mapping *fullname* -> *alias* for each *alias* in
the :class:`~matplotlib.artist.ArtistInspector`.
e.g., for lines::
{'markerfacecolor': 'mfc',
'linewidth' : 'lw',
}
"""
names = [name for name in dir(self.o) if
(name.startswith('set_') or name.startswith('get_'))
and six.callable(getattr(self.o, name))]
aliases = {}
for name in names:
func = getattr(self.o, name)
if not self.is_alias(func):
continue
docstring = func.__doc__
fullname = docstring[10:]
aliases.setdefault(fullname[4:], {})[name[4:]] = None
return aliases
_get_valid_values_regex = re.compile(
r"\n\s*ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))")
def get_valid_values(self, attr):
"""
Get the legal arguments for the setter associated with *attr*.
This is done by querying the docstring of the function *set_attr*
for a line that begins with ACCEPTS:
e.g., for a line linestyle, return
"[ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'steps'`` | ``'None'`` ]"
"""
name = 'set_%s' % attr
if not hasattr(self.o, name):
raise AttributeError('%s has no function %s' % (self.o, name))
func = getattr(self.o, name)
docstring = func.__doc__
if docstring is None:
return 'unknown'
if docstring.startswith('alias for '):
return None
match = self._get_valid_values_regex.search(docstring)
if match is not None:
return match.group(1).replace('\n', ' ')
return 'unknown'
def _get_setters_and_targets(self):
"""
Get the attribute strings and a full path to where the setter
is defined for all setters in an object.
"""
setters = []
for name in dir(self.o):
if not name.startswith('set_'):
continue
o = getattr(self.o, name)
if not six.callable(o):
continue
if len(inspect.getargspec(o)[0]) < 2:
continue
func = o
if self.is_alias(func):
continue
source_class = self.o.__module__ + "." + self.o.__name__
for cls in self.o.mro():
if name in cls.__dict__:
source_class = cls.__module__ + "." + cls.__name__
break
setters.append((name[4:], source_class + "." + name))
return setters
def get_setters(self):
"""
Get the attribute strings with setters for object. e.g., for a line,
return ``['markerfacecolor', 'linewidth', ....]``.
"""
return [prop for prop, target in self._get_setters_and_targets()]
def is_alias(self, o):
"""
Return *True* if method object *o* is an alias for another
function.
"""
ds = o.__doc__
if ds is None:
return False
return ds.startswith('alias for ')
def aliased_name(self, s):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME.
e.g., for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
return s + ''.join([' or %s' % x
for x
in six.iterkeys(self.aliasd[s])])
else:
return s
def aliased_name_rest(self, s, target):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME formatted for ReST
e.g., for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
aliases = ''.join([' or %s' % x
for x
in six.iterkeys(self.aliasd[s])])
else:
aliases = ''
return ':meth:`%s <%s>`%s' % (s, target, aliases)
def pprint_setters(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' % (pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name(prop)
lines.append('%s%s: %s' % (pad, name, accepts))
return lines
def pprint_setters_rest(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values. Format the output for ReST
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' % (pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
########
names = [self.aliased_name_rest(prop, target)
for prop, target
in attrs]
accepts = [self.get_valid_values(prop) for prop, target in attrs]
col0_len = max([len(n) for n in names])
col1_len = max([len(a) for a in accepts])
table_formatstr = pad + '=' * col0_len + ' ' + '=' * col1_len
lines.append('')
lines.append(table_formatstr)
lines.append(pad + 'Property'.ljust(col0_len + 3) + \
'Description'.ljust(col1_len))
lines.append(table_formatstr)
lines.extend([pad + n.ljust(col0_len + 3) + a.ljust(col1_len)
for n, a in zip(names, accepts)])
lines.append(table_formatstr)
lines.append('')
return lines
########
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name_rest(prop, path)
lines.append('%s%s: %s' % (pad, name, accepts))
return lines
def properties(self):
"""
return a dictionary mapping property name -> value
"""
o = self.oorig
getters = [name for name in dir(o)
if name.startswith('get_')
and six.callable(getattr(o, name))]
#print getters
getters.sort()
d = dict()
for name in getters:
func = getattr(o, name)
if self.is_alias(func):
continue
try:
val = func()
except:
continue
else:
d[name[4:]] = val
return d
def pprint_getters(self):
"""
Return the getters and actual values as list of strings.
"""
d = self.properties()
names = list(six.iterkeys(d))
names.sort()
lines = []
for name in names:
val = d[name]
if getattr(val, 'shape', ()) != () and len(val) > 6:
s = str(val[:6]) + '...'
else:
s = str(val)
s = s.replace('\n', ' ')
if len(s) > 50:
s = s[:50] + '...'
name = self.aliased_name(name)
lines.append(' %s = %s' % (name, s))
return lines
def findobj(self, match=None):
"""
Recursively find all :class:`matplotlib.artist.Artist`
instances contained in *self*.
If *match* is not None, it can be
- function with signature ``boolean = match(artist)``
- class instance: e.g., :class:`~matplotlib.lines.Line2D`
used to filter matches.
"""
if match is None: # always return True
def matchfunc(x):
return True
elif issubclass(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif six.callable(match):
matchfunc = func
else:
raise ValueError('match must be None, an '
'matplotlib.artist.Artist '
'subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc
for thisc
in c.findobj(matchfunc)
if matchfunc(thisc)])
if matchfunc(self):
artists.append(self)
return artists
def getp(obj, property=None):
"""
Return the value of object's property. *property* is an optional string
for the property you want to return
Example usage::
getp(obj) # get all the object properties
getp(obj, 'linestyle') # get the linestyle property
*obj* is a :class:`Artist` instance, e.g.,
:class:`~matplotllib.lines.Line2D` or an instance of a
:class:`~matplotlib.axes.Axes` or :class:`matplotlib.text.Text`.
If the *property* is 'somename', this function returns
obj.get_somename()
:func:`getp` can be used to query all the gettable properties with
``getp(obj)``. Many properties have aliases for shorter typing, e.g.
'lw' is an alias for 'linewidth'. In the output, aliases and full
property names will be listed as:
property or alias = value
e.g.:
linewidth or lw = 2
"""
if property is None:
insp = ArtistInspector(obj)
ret = insp.pprint_getters()
print('\n'.join(ret))
return
func = getattr(obj, 'get_' + property)
return func()
# alias
get = getp
def setp(obj, *args, **kwargs):
"""
Set a property on an artist object.
matplotlib supports the use of :func:`setp` ("set property") and
:func:`getp` to set and get object properties, as well as to do
introspection on the object. For example, to set the linestyle of a
line to be dashed, you can do::
>>> line, = plot([1,2,3])
>>> setp(line, linestyle='--')
If you want to know the valid types of arguments, you can provide the
name of the property you want to set without a value::
>>> setp(line, 'linestyle')
linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
If you want to see all the properties that can be set, and their
possible values, you can do::
>>> setp(line)
... long output listing omitted
:func:`setp` operates on a single instance or a list of instances.
If you are in query mode introspecting the possible values, only
the first instance in the sequence is used. When actually setting
values, all the instances will be set. e.g., suppose you have a
list of two lines, the following will make both lines thicker and
red::
>>> x = arange(0,1.0,0.01)
>>> y1 = sin(2*pi*x)
>>> y2 = sin(4*pi*x)
>>> lines = plot(x, y1, x, y2)
>>> setp(lines, linewidth=2, color='r')
:func:`setp` works with the MATLAB style string/value pairs or
with python kwargs. For example, the following are equivalent::
>>> setp(lines, 'linewidth', 2, 'color', 'r') # MATLAB style
>>> setp(lines, linewidth=2, color='r') # python style
"""
insp = ArtistInspector(obj)
if len(kwargs) == 0 and len(args) == 0:
print('\n'.join(insp.pprint_setters()))
return
if len(kwargs) == 0 and len(args) == 1:
print(insp.pprint_setters(prop=args[0]))
return
if not cbook.iterable(obj):
objs = [obj]
else:
objs = cbook.flatten(obj)
if len(args) % 2:
raise ValueError('The set args must be string, value pairs')
funcvals = []
for i in range(0, len(args) - 1, 2):
funcvals.append((args[i], args[i + 1]))
funcvals.extend(kwargs.items())
ret = []
for o in objs:
for s, val in funcvals:
s = s.lower()
funcName = "set_%s" % s
func = getattr(o, funcName)
ret.extend([func(val)])
return [x for x in cbook.flatten(ret)]
def kwdoc(a):
hardcopy = matplotlib.rcParams['docstring.hardcopy']
if hardcopy:
return '\n'.join(ArtistInspector(a).pprint_setters_rest(
leadingspace=2))
else:
return '\n'.join(ArtistInspector(a).pprint_setters(leadingspace=2))
docstring.interpd.update(Artist=kwdoc(Artist))
| mit |
dkainer/pyms | Display/Class.py | 7 | 10046 | """
Class to Display Ion Chromatograms and TIC
"""
#############################################################################
# #
# PyMS software for processing of metabolomic mass-spectrometry data #
# Copyright (C) 2005-2012 Vladimir Likic #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. #
# #
#############################################################################
import matplotlib.pyplot as plt
import numpy
import sys
sys.path.append('/x/PyMS/')
from pyms.GCMS.Class import IonChromatogram
from pyms.Utils.Error import error
class Display(object):
"""
@summary: Class to display Ion Chromatograms and Total
Ion Chromatograms from GCMS.Class.IonChromatogram
Uses matplotlib module pyplot to do plotting
@author: Sean O'Callaghan
@author: Vladimir Likic
"""
def __init__(self):
"""
@summary: Initialises an instance of Display class
"""
# Container to store plots
self.__tic_ic_plots = []
# color dictionary for plotting of ics; blue reserved
# for TIC
self.__col_ic = {0:'r', 1:'g', 2:'k', 3:'y', 4:'m', 5:'c'}
self.__col_count = 0 # counter to keep track of colors
# Peak list container
self.__peak_list = []
#Plotting Variables
self.__fig = plt.figure()
self.__ax = self.__fig.add_subplot(111)
def plot_ics(self, ics, labels = None):
"""
@summary: Adds an Ion Chromatogram or a
list of Ion Chromatograms to plot list
@param ics: List of Ion Chromatograms m/z channels
for plotting
@type ics: list of pyms.GCMS.Class.IonChromatogram
@param labels: Labels for plot legend
@type labels: list of StringType
"""
if not isinstance(ics, list):
if isinstance(ics, IonChromatogram):
ics = [ics]
else:
error("ics argument must be an IonChromatogram\
or a list of Ion Chromatograms")
if not isinstance(labels, list) and labels != None:
labels = [labels]
# TODO: take care of case where one element of ics is
# not an IonChromatogram
intensity_list = []
time_list = ics[0].get_time_list()
for i in range(len(ics)):
intensity_list.append(ics[i].get_intensity_array())
# Case for labels not present
if labels == None:
for i in range(len(ics)):
self.__tic_ic_plots.append(plt.plot(time_list, \
intensity_list[i], self.__col_ic[self.__col_count]))
if self.__col_count == 5:
self.__col_count = 0
else:
self.__col_count += 1
# Case for labels present
else:
for i in range(len(ics)):
self.__tic_ic_plots.append(plt.plot(time_list, \
intensity_list[i], self.__col_ic[self.__col_count]\
, label = labels[i]))
if self.__col_count == 5:
self.__col_count = 0
else:
self.__col_count += 1
def plot_tic(self, tic, label=None):
"""
@summary: Adds Total Ion Chromatogram to plot list
@param tic: Total Ion Chromatogram
@type tic: pyms.GCMS.Class.IonChromatogram
@param label: label for plot legend
@type label: StringType
"""
if not isinstance(tic, IonChromatogram):
error("TIC is not an Ion Chromatogram object")
intensity_list = tic.get_intensity_array()
time_list = tic.get_time_list()
self.__tic_ic_plots.append(plt.plot(time_list, intensity_list,\
label=label))
def plot_peaks(self, peak_list, label = "Peaks"):
"""
@summary: Plots the locations of peaks as found
by PyMS.
@param peak_list: List of peaks
@type peak_list: list of pyms.Peak.Class.Peak
@param label: label for plot legend
@type label: StringType
"""
if not isinstance(peak_list, list):
error("peak_list is not a list")
time_list = []
height_list=[]
# Copy to self.__peak_list for onclick event handling
self.__peak_list = peak_list
for peak in peak_list:
time_list.append(peak.get_rt())
height_list.append(sum(peak.get_mass_spectrum().mass_spec))
self.__tic_ic_plots.append(plt.plot(time_list, height_list, 'o',\
label = label))
def get_5_largest(self, intensity_list):
"""
@summary: Computes the indices of the largest 5 ion intensities
for writing to console
@param intensity_list: List of Ion intensities
@type intensity_list: listType
"""
largest = [0,0,0,0,0,0,0,0,0,0]
# Find out largest value
for i in range(len(intensity_list)):
if intensity_list[i] > intensity_list[largest[0]]:
largest[0] = i
# Now find next four largest values
for j in [1,2,3,4,5,6,7,8,9]:
for i in range(len(intensity_list)):
if intensity_list[i] > intensity_list[largest[j]] and \
intensity_list[i] < intensity_list[largest[j-1]]:
largest[j] = i
return largest
def plot_mass_spec(self, rt, mass_list, intensity_list):
"""
@summary: Plots the mass spec given a list of masses and intensities
@param rt: The retention time for labelling of the plot
@type rt: floatType
@param mass_list: list of masses of the MassSpectrum object
@type mass_list: listType
@param intensity_list: List of intensities of the MassSpectrum object
@type intensity_list: listType
"""
new_fig = plt.figure()
new_ax = new_fig.add_subplot(111)
# to set x axis range find minimum and maximum m/z channels
max_mz = mass_list[0]
min_mz = mass_list[0]
for i in range(len(mass_list)):
if mass_list[i] > max_mz:
max_mz = mass_list[i]
for i in range(len(mass_list)):
if mass_list[i] < min_mz:
min_mz = mass_list[i]
label = "Mass spec for peak at time " + "%5.2f" % rt
mass_spec_plot = plt.bar(mass_list, intensity_list,\
label=label, width=0.01)
x_axis_range = plt.xlim(min_mz, max_mz)
t = new_ax.set_title(label)
plt.show()
def onclick(self, event):
"""
@summary: Finds the 5 highest intensity m/z channels for the selected peak.
The peak is selected by clicking on it. If a button other than
the left one is clicked, a new plot of the mass spectrum is displayed
@param event: a mouse click by the user
"""
intensity_list = []
mass_list = []
for peak in self.__peak_list:
if event.xdata > 0.9999*peak.get_rt() and event.xdata < \
1.0001*peak.get_rt():
intensity_list = peak.get_mass_spectrum().mass_spec
mass_list = peak.get_mass_spectrum().mass_list
largest = self.get_5_largest(intensity_list)
if len(intensity_list) != 0:
print "mass\t intensity"
for i in range(10):
print mass_list[largest[i]], "\t", intensity_list[largest[i]]
else: # if the selected point is not close enough to peak
print "No Peak at this point"
# Check if a button other than left was pressed, if so plot mass spectrum
# Also check that a peak was selected, not just whitespace
if event.button != 1 and len(intensity_list) != 0:
self.plot_mass_spec(event.xdata, mass_list, intensity_list)
def do_plotting(self, plot_label = None):
"""
@summary: Plots TIC and IC(s) if they have been created
by plot_tic() or plot_ics(). Adds detected peaks
if they have been added by plot_peaks()
@param plot_label: Optional to supply a label or other
definition of data origin
@type plot_label: StringType
"""
# if no plots have been created advise user
if len(self.__tic_ic_plots) == 0:
print 'No plots have been created'
print 'Please call a plotting function before'
print 'calling do_plotting()'
if plot_label != None :
t = self.__ax.set_title(plot_label)
l = self.__ax.legend()
self.__fig.canvas.draw
# If no peak list plot, no mouse click event
if len(self.__peak_list) != 0:
cid = self.__fig.canvas.mpl_connect('button_press_event', self.onclick)
plt.show()
| gpl-2.0 |
cogstat/cogstat | cogstat/cogstat_dialogs.py | 1 | 35802 | # -*- coding: utf-8 -*-
import gettext
import os
from PyQt5 import QtWidgets, QtCore, QtGui
from . import cogstat_config as csc
QString = str
t = gettext.translation('cogstat', os.path.dirname(os.path.abspath(__file__))+'/locale/', [csc.language], fallback=True)
_ = t.gettext
# Overwrite the qt _translate function, use gettext
def _gui(a, b):
return(_(b))
QtCore.QCoreApplication.translate = _gui
### File menu commands ###
# http://developer.qt.nokia.com/doc/qt-4.8/QFileDialog.html
def open_data_file():
#dialog = QtWidgets.QFileDialog()
#dialog.setFilter(QtCore.QDir.CaseSensitive)
# TODO how to make the filter case insensitive?
return str(QtWidgets.QFileDialog.getOpenFileName(None, _('Open data file'), '',
'%s (*.ods *.xls *.xlsx *.csv *.txt *.tsv *.dat *.log '
'*.sav *.zsav *.por *.jasp *.omv *.sas7bdat *.xpt *.dta '
'*.rdata *.Rdata *.rds *.rda);;'
'%s *.ods *.xls *xlsx;;%s *.csv *.txt *.tsv *.dat *.log;;'
'%s *.sav *.zsav *.por;;%s *.omv;;%s *.jasp;;'
'%s *.sas7bdat *.xpt;; %s *.dta;;'
'%s *.rdata *.Rdata *.rds *.rda' %
(_('All importable data files'),
_('Spreadsheet files'), _('Text files'),
_('SPSS data files'), _('jamovi data files'),
_('JASP data files'), _('SAS data files'),
_('STATA data files'), _('R data files'))
)[0])
def open_demo_data_file():
return str(QtWidgets.QFileDialog.getOpenFileName(None, _('Open data file'), os.path.dirname(csc.__file__) +
'/demo_data',
'%s (*.ods *.xls *.xlsx *.csv *.txt *.tsv *.dat *.log '
'*.sav *.zsav *.por *.jasp *.omv *.sas7bdat *.xpt *.dta '
'*.rdata *.Rdata *.rds *.rda);;'
'%s *.ods *.xls *xlsx;;%s *.csv *.txt *.tsv *.dat *.log;;'
'%s *.sav *.zsav *.por;;%s *.omv;;%s *.jasp;;'
'%s *.sas7bdat *.xpt;; %s *.dta;;'
'%s *.rdata *.Rdata *.rds *.rda' %
(_('All importable data files'),
_('Spreadsheet files'), _('Text files'),
_('SPSS data files'), _('jamovi data files'),
_('JASP data files'), _('SAS data files'),
_('STATA data files'), _('R data files'))
)[0])
def save_output():
return str(QtWidgets.QFileDialog.getSaveFileName(None, _('Save result file'), 'CogStat analysis result.pdf',
'*.pdf')[0])
# XXX self.buttonBox.Ok.setEnabled(False) # TODO how can we disable the OK button without the other?
# TODO Some variables are CamelCase - change them
### Various functions ###
# TODO functions should be private
def init_source_vars(list_widget, names, already_in_use):
"""
:param list_widget: source variables
:param names: names of the variables
:param already_in_use: list of listWidgets that may contain variables already in use
:return:
"""
already_in_use_vars = []
for in_use_list_widget in already_in_use:
already_in_use_vars.extend([in_use_list_widget.item(i).text() for i in range(in_use_list_widget.count())])
list_widget.clear() # clear source list in case new data is loaded
for var_name in names:
if not(var_name in already_in_use_vars):
list_widget.addItem(QString(var_name))
def remove_ceased_vars(list_widget, names):
"""
If list_widget includes items that are not in the names list,
then remove those items.
"""
for item_i in range(list_widget.count()-1, -1, -1):
if not str(list_widget.item(item_i).text()) in names:
list_widget.takeItem(item_i)
def add_to_list_widget(source_list_widget, target_list_widget):
"""
Add the selected item(s) of the source_list_widget to the target_list_widget, and remove the item(s) from the
source_list_widget.
"""
# TODO add a maximum parameter, for the maximum number of items that can be added
for item in source_list_widget.selectedItems():
target_list_widget.addItem(QString(item.text()))
source_list_widget.takeItem(source_list_widget.row(item))
def remove_item_from_list_widget(source_list_widget, target_list_widget, names):
"""
Remove selected item(s) from target_list_widget, and add it to the source_list_widget.
"""
for item in target_list_widget.selectedItems():
target_list_widget.takeItem(target_list_widget.row(item))
source_list_widget.insertItem(find_previous_item_position(source_list_widget, names, item.text()), item.text())
def find_previous_item_position(list_widget, names, text_item):
"""
TODO
"""
names = list(names)
if list(reversed(names[:names.index(text_item)])): # check if the text_item is not the first in the variable list,
# otherwise return zero
for item in reversed(names[:names.index(text_item)]):
try: # if the item is in the list_widget, then return its position
return list_widget.row(list_widget.findItems(item, QtCore.Qt.MatchExactly)[0])+1
except: # otherwise look further for next variable names
pass
return 0 # if no earlier variables were found on list_widget (or the text_item is the first in the variable list)
# insert the item at the beginning of the list_widget
def add_to_list_widget_with_factors(source_list_widget, target_list_widget, names=[]):
"""
Add the selected items of the source_list_widget to the target_list_widget,
"""
if target_list_widget.selectedItems(): # there are selected items in the target list
start_target_row = target_list_widget.row(target_list_widget.selectedItems()[0])
else:
start_target_row = 0
for item_source in source_list_widget.selectedItems():
for item_target_i in range(start_target_row, target_list_widget.count()):
item_text = target_list_widget.item(item_target_i).text()
if item_text.endswith(' :: '):
target_list_widget.item(item_target_i).setText(item_text + item_source.text())
source_list_widget.takeItem(source_list_widget.row(item_source))
break
def remove_from_list_widget_with_factors(source_list_widget, target_list_widget, names=[]):
"""
Remove selected items from target_list_widget.
"""
for item in target_list_widget.selectedItems():
if item.text().split(' :: ')[1]:
source_list_widget.insertItem(find_previous_item_position(source_list_widget, names, item.text().
split(' :: ')[1]), item.text().split(' :: ')[1])
item.setText(item.text().split(' :: ')[0]+' :: ')
def _float_or_none(x):
try:
return float(x)
except ValueError:
return None
### Data dialogs ###
from .ui import pivot
class pivot_dialog(QtWidgets.QDialog, pivot.Ui_Dialog):
def __init__(self, parent=None, names=[]):
QtWidgets.QDialog.__init__(self, parent)
self.setupUi(self)
self.setModal(True)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.addRows.clicked.connect(self.add_rows)
self.removeRows.clicked.connect(self.remove_rows)
self.rowsListWidget.doubleClicked.connect(self.remove_rows)
self.addColumns.clicked.connect(self.add_columns)
self.removeColumns.clicked.connect(self.remove_columns)
self.columnsListWidget.doubleClicked.connect(self.remove_columns)
self.addPages.clicked.connect(self.add_pages)
self.removePages.clicked.connect(self.remove_pages)
self.pagesListWidget.doubleClicked.connect(self.remove_pages)
self.addDependent.clicked.connect(self.add_dependent)
self.removeDependent.clicked.connect(self.remove_dependent)
self.dependentListWidget.doubleClicked.connect(self.remove_dependent)
self.init_vars(names)
self.show()
def init_vars(self, names):
self.names = names
remove_ceased_vars(self.pagesListWidget, names)
remove_ceased_vars(self.columnsListWidget, names)
remove_ceased_vars(self.rowsListWidget, names)
remove_ceased_vars(self.dependentListWidget, names)
init_source_vars(self.sourceListWidget, names, [self.pagesListWidget, self.columnsListWidget,
self.rowsListWidget, self.dependentListWidget])
def add_rows(self):
add_to_list_widget(self.sourceListWidget, self.rowsListWidget)
def remove_rows(self):
remove_item_from_list_widget(self.sourceListWidget, self.rowsListWidget, self.names)
def add_columns(self):
add_to_list_widget(self.sourceListWidget, self.columnsListWidget)
def remove_columns(self):
remove_item_from_list_widget(self.sourceListWidget, self.columnsListWidget, self.names)
def add_pages(self):
add_to_list_widget(self.sourceListWidget, self.pagesListWidget)
def remove_pages(self):
remove_item_from_list_widget(self.sourceListWidget, self.pagesListWidget, self.names)
def add_dependent(self):
if self.dependentListWidget.count() == 0: # do this only if the list is empty
add_to_list_widget(self.sourceListWidget, self.dependentListWidget)
def remove_dependent(self):
remove_item_from_list_widget(self.sourceListWidget, self.dependentListWidget, self.names)
def read_parameters(self):
return ([str(self.rowsListWidget.item(i).text()) for i in range(self.rowsListWidget.count())],
[str(self.columnsListWidget.item(i).text()) for i in range(self.columnsListWidget.count())],
[str(self.pagesListWidget.item(i).text()) for i in range(self.pagesListWidget.count())],
[str(self.dependentListWidget.item(i).text()) for i in range(self.dependentListWidget.count())][0] if
self.dependentListWidget.count() else [],
str(self.function.currentText()))
from .ui import diffusion
class diffusion_dialog(QtWidgets.QDialog, diffusion.Ui_Dialog):
def __init__(self, parent=None, names=[]):
QtWidgets.QDialog.__init__(self, parent)
self.setupUi(self)
self.setModal(True)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.addRT.clicked.connect(self.add_RT)
self.removeRT.clicked.connect(self.remove_RT)
self.RTListWidget.doubleClicked.connect(self.remove_RT)
self.addError.clicked.connect(self.add_error)
self.removeError.clicked.connect(self.remove_error)
self.errorListWidget.doubleClicked.connect(self.remove_error)
self.addParticipant.clicked.connect(self.add_participant)
self.removeParticipant.clicked.connect(self.remove_participant)
self.participantListWidget.doubleClicked.connect(self.remove_participant)
self.addCondition.clicked.connect(self.add_condition)
self.removeCondition.clicked.connect(self.remove_condition)
self.conditionListWidget.doubleClicked.connect(self.remove_condition)
self.init_vars(names)
self.show()
def init_vars(self, names):
self.names = names
remove_ceased_vars(self.RTListWidget, names)
remove_ceased_vars(self.errorListWidget, names)
remove_ceased_vars(self.participantListWidget, names)
remove_ceased_vars(self.conditionListWidget, names)
init_source_vars(self.sourceListWidget, names,
[self.RTListWidget, self.errorListWidget, self.participantListWidget,
self.conditionListWidget])
def add_RT(self):
if self.RTListWidget.count() == 0: # do this only if the list is empty
add_to_list_widget(self.sourceListWidget, self.RTListWidget)
def remove_RT(self):
remove_item_from_list_widget(self.sourceListWidget, self.RTListWidget, self.names)
def add_error(self):
if self.errorListWidget.count() == 0: # do this only if the list is empty
add_to_list_widget(self.sourceListWidget, self.errorListWidget)
def remove_error(self):
remove_item_from_list_widget(self.sourceListWidget, self.errorListWidget, self.names)
def add_participant(self):
if self.participantListWidget.count() == 0: # do this only if the list is empty
add_to_list_widget(self.sourceListWidget, self.participantListWidget)
def remove_participant(self):
remove_item_from_list_widget(self.sourceListWidget, self.participantListWidget, self.names)
def add_condition(self):
add_to_list_widget(self.sourceListWidget, self.conditionListWidget)
def remove_condition(self):
remove_item_from_list_widget(self.sourceListWidget, self.conditionListWidget, self.names)
def read_parameters(self):
return ([str(self.errorListWidget.item(i).text()) for i in range(self.errorListWidget.count())],
[str(self.RTListWidget.item(i).text()) for i in range(self.RTListWidget.count())],
[str(self.participantListWidget.item(i).text()) for i in range(self.participantListWidget.count())],
[str(self.conditionListWidget.item(i).text()) for i in range(self.conditionListWidget.count())])
from .ui import filter_outlier
class filter_outlier(QtWidgets.QDialog, filter_outlier.Ui_Dialog):
def __init__(self, parent=None, names=[]):
QtWidgets.QDialog.__init__(self, parent)
self.setupUi(self)
self.setModal(True)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.source_listWidget.doubleClicked.connect(self.add_var)
self.selected_listWidget.doubleClicked.connect(self.remove_var)
self.addVar.clicked.connect(self.add_var)
self.removeVar.clicked.connect(self.remove_var)
self.init_vars(names)
self.show()
def init_vars(self, names):
self.names = names
remove_ceased_vars(self.selected_listWidget, names)
init_source_vars(self.source_listWidget, names, [self.selected_listWidget])
def add_var(self):
add_to_list_widget(self.source_listWidget, self.selected_listWidget)
def remove_var(self):
remove_item_from_list_widget(self.source_listWidget, self.selected_listWidget, self.names)
def read_parameters(self):
return [str(self.selected_listWidget.item(i).text()) for i in range(self.selected_listWidget.count())]
from .ui import var_properties
class explore_var_dialog(QtWidgets.QDialog, var_properties.Ui_Dialog):
def __init__(self, parent=None, names=[]):
QtWidgets.QDialog.__init__(self, parent)
self.setupUi(self)
self.setModal(True)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.source_listWidget.doubleClicked.connect(self.add_var)
self.selected_listWidget.doubleClicked.connect(self.remove_var)
self.addVar.clicked.connect(self.add_var)
self.removeVar.clicked.connect(self.remove_var)
self.init_vars(names)
self.show()
def init_vars(self, names):
self.names = names
remove_ceased_vars(self.selected_listWidget, names)
init_source_vars(self.source_listWidget, names, [self.selected_listWidget])
def add_var(self):
add_to_list_widget(self.source_listWidget, self.selected_listWidget)
def remove_var(self):
remove_item_from_list_widget(self.source_listWidget, self.selected_listWidget, self.names)
def read_parameters(self):
return ([str(self.selected_listWidget.item(i).text()) for i in range(self.selected_listWidget.count())],
self.freq_checkbox.isChecked(), str(self.ttest_value.text()))
from .ui import xylims
class xylims_dialog(QtWidgets.QDialog, xylims.Ui_Dialog):
def __init__(self, parent=None):
QtWidgets.QDialog.__init__(self, parent)
self.setupUi(self)
self.setModal(True)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
def read_parameters(self):
return [self.lineEdit.text(), self.lineEdit_2.text()], [self.lineEdit_3.text(), self.lineEdit_4.text()]
from .ui import explore_var_pairs
class explore_var_pairs_dialog(QtWidgets.QDialog, explore_var_pairs.Ui_Dialog):
def __init__(self, parent=None, names=[]):
QtWidgets.QDialog.__init__(self, parent)
self.setupUi(self)
self.setModal(True)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.source_listWidget.doubleClicked.connect(self.add_var)
self.selected_listWidget.doubleClicked.connect(self.remove_var)
self.addVar.clicked.connect(self.add_var)
self.removeVar.clicked.connect(self.remove_var)
self.pushButton.clicked.connect(self.optionsButton_clicked)
self.xylims_dialog = xylims_dialog(self)
self.xlims = [None, None]
self.ylims = [None, None]
self.init_vars(names)
self.show()
def init_vars(self, names):
self.names = names
remove_ceased_vars(self.selected_listWidget, names)
init_source_vars(self.source_listWidget, names, [self.selected_listWidget])
def add_var(self):
add_to_list_widget(self.source_listWidget, self.selected_listWidget)
def remove_var(self):
remove_item_from_list_widget(self.source_listWidget, self.selected_listWidget, self.names)
def optionsButton_clicked(self):
if self.xylims_dialog.exec_():
self.xlims, self.ylims = self.xylims_dialog.read_parameters()
self.xlims[0] = _float_or_none(self.xlims[0])
self.xlims[1] = _float_or_none(self.xlims[1])
self.ylims[0] = _float_or_none(self.ylims[0])
self.ylims[1] = _float_or_none(self.ylims[1])
def read_parameters(self):
return [str(self.selected_listWidget.item(i).text()) for i in range(self.selected_listWidget.count())], \
self.xlims, self.ylims
from .ui import factor
class factor_dialog(QtWidgets.QDialog, factor.Ui_Dialog):
def __init__(self, parent=None):
QtWidgets.QDialog.__init__(self, parent)
self.setupUi(self)
self.setModal(True)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
def set_parameters(self, lineEdit='', spinBox=None):
if lineEdit:
self.lineEdit.setText(lineEdit)
if spinBox:
self.spinBox.setValue(spinBox)
def read_parameters(self):
return self.lineEdit.text(), self.spinBox.value()
from .ui import factors
class factors_dialog(QtWidgets.QDialog, factors.Ui_Dialog):
def __init__(self, parent=None):
QtWidgets.QDialog.__init__(self, parent)
self.setupUi(self)
self.setModal(True)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.listWidget.doubleClicked.connect(self.modifyButton_clicked)
self.pushButton.clicked.connect(self.addButton_clicked)
self.pushButton_2.clicked.connect(self.modifyButton_clicked)
self.pushButton_3.clicked.connect(self.removeButton_clicked)
self.factor_dialog = factor_dialog(self)
def addButton_clicked(self):
self.factor_dialog.lineEdit.setFocus()
if self.factor_dialog.exec_():
factor_name, level_n = self.factor_dialog.read_parameters()
self.listWidget.addItem(QString('%s (%d)' % (factor_name, level_n)))
def modifyButton_clicked(self):
self.factor_dialog.lineEdit.setFocus()
for item in self.listWidget.selectedItems():
t = item.text()
text_to_modify = t[:t.rfind(' (')]
value_to_modify = int(t[t.rfind('(')+1:t.rfind(')')])
self.factor_dialog.set_parameters(text_to_modify, value_to_modify)
if self.factor_dialog.exec_():
factor_name, level_n = self.factor_dialog.read_parameters()
item.setText(QString('%s (%d)' % (factor_name, level_n)))
def removeButton_clicked(self):
for item in self.listWidget.selectedItems():
self.listWidget.takeItem(self.listWidget.row(item))
def read_parameters(self):
return [self.listWidget.item(i).text() for i in range(self.listWidget.count())]
#return [str(self.selected_listWidget.item(i).text()) for i in range(self.selected_listWidget.count())]
from .ui import ylims
class ylims_dialog(QtWidgets.QDialog, ylims.Ui_Dialog):
def __init__(self, parent=None):
QtWidgets.QDialog.__init__(self, parent)
self.setupUi(self)
self.setModal(True)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
def read_parameters(self):
return [self.lineEdit.text(), self.lineEdit_2.text()]
from .ui import compare_vars
class compare_vars_dialog(QtWidgets.QDialog, compare_vars.Ui_Dialog):
def __init__(self, parent=None, names=[]):
QtWidgets.QDialog.__init__(self, parent)
self.setupUi(self)
self.setModal(True)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.source_listWidget.doubleClicked.connect(self.add_var)
self.selected_listWidget.doubleClicked.connect(self.remove_var)
self.addVar.clicked.connect(self.add_var)
self.removeVar.clicked.connect(self.remove_var)
self.pushButton.clicked.connect(self.factorsButton_clicked)
self.pushButton_2.clicked.connect(self.optionsButton_clicked)
self.factors_dialog = factors_dialog(self)
self.ylims_dialog = ylims_dialog(self)
self.factors = []
self.ylims = [None, None]
self.init_vars(names)
self.show()
def init_vars(self, names):
self.names = names
init_source_vars(self.source_listWidget, names, [])
if len(self.factors) < 2:
remove_ceased_vars(self.selected_listWidget, names)
init_source_vars(self.source_listWidget, names, [self.selected_listWidget])
def add_var(self):
if len(self.factors) < 2:
add_to_list_widget(self.source_listWidget, self.selected_listWidget)
else:
add_to_list_widget_with_factors(self.source_listWidget, self.selected_listWidget, names=self.names)
def remove_var(self):
if len(self.factors) < 2:
remove_item_from_list_widget(self.source_listWidget, self.selected_listWidget, self.names)
else:
remove_from_list_widget_with_factors(self.source_listWidget, self.selected_listWidget, names=self.names)
def show_factors(self):
# remove all items first
for i in range(self.selected_listWidget.count()):
item = self.selected_listWidget.takeItem(0)
if ' :: ' in item.text():
if not item.text().endswith(' :: '):
self.source_listWidget.insertItem(
find_previous_item_position(self.source_listWidget, self.names, item.text().split(' :: ')[1]),
item.text().split(' :: ')[1])
item.setText(item.text().split(' :: ')[0] + ' :: ')
else:
self.source_listWidget.insertItem(
find_previous_item_position(self.source_listWidget, self.names, item.text()),
item.text())
# add new empty factor levels
factor_combinations = ['']
for factor in self.factors:
factor_combinations = ['%s - %s %s' % (factor_combination, factor[0], level_i + 1) for factor_combination in
factor_combinations for level_i in range(factor[1])]
factor_combinations = [factor_combination[3:] + ' :: ' for factor_combination in factor_combinations]
for factor_combination in factor_combinations:
self.selected_listWidget.addItem(QString(factor_combination))
def factorsButton_clicked(self):
if self.factors_dialog.exec_():
factor_list = self.factors_dialog.read_parameters()
#print(factor_list)
self.factors = [[t[:t.rfind(' (')], int(t[t.rfind('(')+1:t.rfind(')')])] for t in factor_list]
#print(self.factors)
if len(self.factors) > 1:
self.show_factors()
else: # remove the factor levels if there is one or zero factor level
for i in range(self.selected_listWidget.count()):
item = self.selected_listWidget.takeItem(0)
# move formerly selected variables back to the source list
if ' :: ' in item.text():
if not item.text().endswith(' :: '): # if there is a factor name and a variable
self.source_listWidget.insertItem(
find_previous_item_position(self.source_listWidget, self.names,
item.text().split(' :: ')[1]),
item.text().split(' :: ')[1])
item.setText(item.text().split(' :: ')[0] + ' :: ')
else:
self.source_listWidget.insertItem(
find_previous_item_position(self.source_listWidget, self.names, item.text()),
item.text())
def optionsButton_clicked(self):
if self.ylims_dialog.exec_():
self.ylims = self.ylims_dialog.read_parameters()
self.ylims[0] = _float_or_none(self.ylims[0])
self.ylims[1] = _float_or_none(self.ylims[1])
def read_parameters(self):
if len(self.factors) > 1:
return [str(self.selected_listWidget.item(i).text().split(' :: ')[1]) for i in
range(self.selected_listWidget.count())], self.factors, self.ylims
else:
return [str(self.selected_listWidget.item(i).text()) for i in
range(self.selected_listWidget.count())], self.factors, self.ylims
from .ui import compare_groups_single_case_slope
class compare_groups_single_case_slope_dialog(QtWidgets.QDialog, compare_groups_single_case_slope.Ui_Dialog):
def __init__(self, parent=None, names=[]):
QtWidgets.QDialog.__init__(self, parent)
self.setupUi(self)
self.setModal(True)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.source_listWidget.doubleClicked.connect(self.add_var)
self.selected_listWidget.doubleClicked.connect(self.remove_var)
self.addVar.clicked.connect(self.add_var)
self.removeVar.clicked.connect(self.remove_var)
self.init_vars(names)
#self.show()
def init_vars(self, names):
self.names = names
remove_ceased_vars(self.selected_listWidget, names)
init_source_vars(self.source_listWidget, names, [self.selected_listWidget])
def add_var(self):
if self.selected_listWidget.count() == 0: # allow only if the list is empty
add_to_list_widget(self.source_listWidget, self.selected_listWidget)
def remove_var(self):
remove_item_from_list_widget(self.source_listWidget, self.selected_listWidget, self.names)
def read_parameters(self):
return ([str(self.selected_listWidget.item(i).text()) for i in range(self.selected_listWidget.count())][0] if
self.selected_listWidget.count() else [],
str(self.spinBox.text()))
from .ui import compare_groups
class compare_groups_dialog(QtWidgets.QDialog, compare_groups.Ui_Dialog):
def __init__(self, parent=None, names=[]):
QtWidgets.QDialog.__init__(self, parent)
self.setupUi(self)
self.setModal(True)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.selected_listWidget.doubleClicked.connect(self.remove_var)
self.group_listWidget.doubleClicked.connect(self.remove_group)
self.addVar.clicked.connect(self.add_var)
self.removeVar.clicked.connect(self.remove_var)
self.add_group_button.clicked.connect(self.add_group)
self.remove_group_button.clicked.connect(self.remove_group)
self.pushButton.clicked.connect(self.on_slopeButton_clicked)
self.pushButton_2.clicked.connect(self.optionsButton_clicked)
self.slope_dialog = compare_groups_single_case_slope_dialog(self, names=names)
self.ylims_dialog = ylims_dialog(self)
self.single_case_slope_SE, self.single_case_slope_trial_n = [], 0
self.ylims = [None, None]
self.init_vars(names)
self.show()
def init_vars(self, names):
self.names = names
remove_ceased_vars(self.selected_listWidget, names)
remove_ceased_vars(self.group_listWidget, names)
init_source_vars(self.source_listWidget, names, [self.selected_listWidget, self.group_listWidget])
self.slope_dialog.init_vars(names)
def add_var(self):
add_to_list_widget(self.source_listWidget, self.selected_listWidget)
def remove_var(self):
remove_item_from_list_widget(self.source_listWidget, self.selected_listWidget, self.names)
def add_group(self):
if self.group_listWidget.count() < 2: # allow maximum two grouping variables
add_to_list_widget(self.source_listWidget, self.group_listWidget)
def remove_group(self):
remove_item_from_list_widget(self.source_listWidget, self.group_listWidget, self.names)
def on_slopeButton_clicked(self):
if self.slope_dialog.exec_():
self.single_case_slope_SE, self.single_case_slope_trial_n = self.slope_dialog.read_parameters()
def optionsButton_clicked(self):
if self.ylims_dialog.exec_():
self.ylims = self.ylims_dialog.read_parameters()
self.ylims[0] = _float_or_none(self.ylims[0])
self.ylims[1] = _float_or_none(self.ylims[1])
def read_parameters(self):
return ([str(self.selected_listWidget.item(i).text()) for i in range(self.selected_listWidget.count())],
[str(self.group_listWidget.item(i).text()) for i in range(self.group_listWidget.count())],
self.single_case_slope_SE, int(self.single_case_slope_trial_n), self.ylims)
from .ui import find_text
class find_text_dialog(QtWidgets.QDialog, find_text.Ui_Dialog):
def __init__(self, parent=None, output_pane=None):
QtWidgets.QDialog.__init__(self, parent)
self.setupUi(self)
self.setModal(True)
self.output_pane = output_pane
self.pushButton_next.clicked.connect(self.find_forward_text)
self.pushButton_previous.clicked.connect(self.find_backward_text)
self.buttonBox.button(QtWidgets.QDialogButtonBox.Close).clicked.connect(self.reject)
self.lineEdit.setFocus()
self.show()
def find_forward_text(self):
self.output_pane.find(self.lineEdit.text())
def find_backward_text(self):
self.output_pane.find(self.lineEdit.text(), QtGui.QTextDocument.FindBackward)
from .ui import preferences
class preferences_dialog(QtWidgets.QDialog, preferences.Ui_Dialog):
def __init__(self, parent=None):
QtWidgets.QDialog.__init__(self, parent)
self.setupUi(self)
self.setModal(True)
self.buttonBox.accepted.connect(self.write_settings)
self.buttonBox.rejected.connect(self.reject)
self.init_langs()
self.init_themes()
self.show()
def init_langs(self):
"""Set the available languages.
"""
import glob
import os
def available_langs(domain=None, localedir=None):
"""Look for available languages"""
if domain is None:
domain = gettext._current_domain
if localedir is None:
localedir = gettext._default_localedir
files = glob.glob(os.path.join(localedir, '*', 'LC_MESSAGES', '%s.mo' % domain))
langs = [file_name.split(os.path.sep)[-3] for file_name in files]
return langs
langs = sorted(['en']+available_langs(domain='cogstat', localedir=os.path.dirname(os.path.abspath(__file__)) +
'/locale'))
# local language names based on https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
lang_names = {'bg': 'Български (Bulgarian)', 'de': 'Deutsch (German)', 'en': 'English',
'el': 'Ελληνικά (Greek)', 'es': 'Español (Spanish)',
'et': 'Eesti (Estonian)', 'fa': 'فارسی (Persian)',
'fr': 'Français (French)', 'he': 'עברית (Hebrew)',
'hr': 'Hrvatski (Croatian)', 'hu': 'Magyar (Hungarian)', 'it': 'Italiano (Italian)',
'kk': 'Qazaqsha (Kazakh)', 'ko': '한국어 (Korean)', 'nb': 'Norsk Bokmål (Norvegian Bokmål)',
'ro': 'Română (Romanian)', 'ru': 'Русский (Russian)', 'sk': 'Slovenčina (Slovak)',
'th': 'ไทย (Thai)'}
lang_names_sorted = sorted([lang_names[lang] for lang in langs])
self.lang_codes = {lang_name:lang_code for lang_code, lang_name in zip(lang_names.keys(), lang_names.values())}
self.langComboBox.clear()
for lang_name in lang_names_sorted:
self.langComboBox.addItem(lang_name)
self.langComboBox.setCurrentIndex(lang_names_sorted.index(lang_names[csc.language]))
def init_themes(self):
"""Set the available themes.
"""
import matplotlib.pyplot as plt
themes = sorted(plt.style.available)
self.themeComboBox.clear()
for theme in themes:
self.themeComboBox.addItem(theme)
self.themeComboBox.setCurrentIndex(themes.index(csc.theme))
def write_settings(self):
"""Save the settings when OK is pressed.
"""
csc.save(['language'], self.lang_codes[str(self.langComboBox.currentText())])
csc.save(['graph', 'theme'], str(self.themeComboBox.currentText()))
self.accept()
| gpl-3.0 |
precedenceguo/mxnet | example/reinforcement-learning/ddpg/strategies.py | 42 | 2473 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
class BaseStrategy(object):
"""
Base class of exploration strategy.
"""
def get_action(self, obs, policy):
raise NotImplementedError
def reset(self):
pass
class OUStrategy(BaseStrategy):
"""
Ornstein-Uhlenbeck process: dxt = theta * (mu - xt) * dt + sigma * dWt
where Wt denotes the Wiener process.
"""
def __init__(self, env_spec, mu=0, theta=0.15, sigma=0.3):
self.mu = mu
self.theta = theta
self.sigma = sigma
self.action_space = env_spec.action_space
self.state = np.ones(self.action_space.flat_dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state
def reset(self):
self.state = np.ones(self.action_space.flat_dim) * self.mu
def get_action(self, obs, policy):
# get_action accepts a 2D tensor with one row
obs = obs.reshape((1, -1))
action = policy.get_action(obs)
increment = self.evolve_state()
return np.clip(action + increment,
self.action_space.low,
self.action_space.high)
if __name__ == "__main__":
class Env1(object):
def __init__(self):
self.action_space = Env2()
class Env2(object):
def __init__(self):
self.flat_dim = 2
env_spec = Env1()
test = OUStrategy(env_spec)
states = []
for i in range(1000):
states.append(test.evolve_state()[0])
import matplotlib.pyplot as plt
plt.plot(states)
plt.show()
| apache-2.0 |
zheminzhou/GrapeTree | simulations/plot_sens_pre.py | 2 | 1747 | import numpy as np
from numpy import median
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import pandas as pd
import collections
import sys
sum_file = sys.argv[1]
df = pd.read_csv(sum_file,header=None,sep='\t')
df = df[df[4] <= 0.007]
#del df[4]
#colors = iter(mpl.cm.rainbow(np.linspace(0, 1, 4)))
colors = ["Blues","Greens","Purples","Reds"]
x = df.groupby(1, sort=False)
fig, ax = plt.subplots(figsize=(6,4))
counter = 0
colorhash = collections.OrderedDict()
for name, group in x:
df_sel = df.loc[df[1] == name]
agg = df_sel.groupby(pd.cut(df_sel[0],(0,0.00008,0.0008,0.008,0.08,0.8))).aggregate(np.mean)
cmap = colors[counter]
norm = mpl.colors.LogNorm(vmin=0.000008, vmax=0.5)
ax.plot(agg[3], agg[7], lw=1, color='grey',zorder = 1,label=name)
cax = ax.scatter(agg[3], agg[7], cmap=mpl.cm.Greys, norm=norm, c=agg[4], zorder=0, s=0)
ax.scatter(agg[3],agg[7], cmap = cmap, norm = norm, c=agg[4], zorder = 2, s=90)
map = mpl.cm.get_cmap(colors[counter])
colorhash[name] = map(0.7)
counter += 1
# The following two lines generate custom fake lines that will be used as legend entries:
markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o', linestyle='') for color in colorhash.values()]
ax.legend(markers, colorhash.keys(), numpoints=1, loc='upper left', markerscale=1.2,frameon=True)
ax.grid(b=False)
ax.set_xlabel('precision (true positive rate)')
ax.set_ylabel('sensitivity (positive predictive value)')
plt.xlim([0.8, 1.0])
plt.ylim([0.5, 1.05])
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(cax, cax=cbar_ax, label="substitution rate")
fig.savefig("rates.pdf")
#plt.show() | gpl-3.0 |
jseabold/statsmodels | examples/python/quantile_regression.py | 5 | 4049 | # coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook quantile_regression.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Quantile regression
#
# This example page shows how to use ``statsmodels``' ``QuantReg`` class
# to replicate parts of the analysis published in
#
# * Koenker, Roger and Kevin F. Hallock. "Quantile Regressioin". Journal
# of Economic Perspectives, Volume 15, Number 4, Fall 2001, Pages 143–156
#
# We are interested in the relationship between income and expenditures on
# food for a sample of working class Belgian households in 1857 (the Engel
# data).
#
# ## Setup
#
# We first need to load some modules and to retrieve the data.
# Conveniently, the Engel dataset is shipped with ``statsmodels``.
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
data = sm.datasets.engel.load_pandas().data
data.head()
# ## Least Absolute Deviation
#
# The LAD model is a special case of quantile regression where q=0.5
mod = smf.quantreg('foodexp ~ income', data)
res = mod.fit(q=.5)
print(res.summary())
# ## Visualizing the results
#
# We estimate the quantile regression model for many quantiles between .05
# and .95, and compare best fit line from each of these models to Ordinary
# Least Squares results.
# ### Prepare data for plotting
#
# For convenience, we place the quantile regression results in a Pandas
# DataFrame, and the OLS results in a dictionary.
quantiles = np.arange(.05, .96, .1)
def fit_model(q):
res = mod.fit(q=q)
return [q, res.params['Intercept'], res.params['income']
] + res.conf_int().loc['income'].tolist()
models = [fit_model(x) for x in quantiles]
models = pd.DataFrame(models, columns=['q', 'a', 'b', 'lb', 'ub'])
ols = smf.ols('foodexp ~ income', data).fit()
ols_ci = ols.conf_int().loc['income'].tolist()
ols = dict(
a=ols.params['Intercept'],
b=ols.params['income'],
lb=ols_ci[0],
ub=ols_ci[1])
print(models)
print(ols)
# ### First plot
#
# This plot compares best fit lines for 10 quantile regression models to
# the least squares fit. As Koenker and Hallock (2001) point out, we see
# that:
#
# 1. Food expenditure increases with income
# 2. The *dispersion* of food expenditure increases with income
# 3. The least squares estimates fit low income observations quite poorly
# (i.e. the OLS line passes over most low income households)
x = np.arange(data.income.min(), data.income.max(), 50)
get_y = lambda a, b: a + b * x
fig, ax = plt.subplots(figsize=(8, 6))
for i in range(models.shape[0]):
y = get_y(models.a[i], models.b[i])
ax.plot(x, y, linestyle='dotted', color='grey')
y = get_y(ols['a'], ols['b'])
ax.plot(x, y, color='red', label='OLS')
ax.scatter(data.income, data.foodexp, alpha=.2)
ax.set_xlim((240, 3000))
ax.set_ylim((240, 2000))
legend = ax.legend()
ax.set_xlabel('Income', fontsize=16)
ax.set_ylabel(
'Food expenditure', fontsize=16)
# ### Second plot
#
# The dotted black lines form 95% point-wise confidence band around 10
# quantile regression estimates (solid black line). The red lines represent
# OLS regression results along with their 95% confidence interval.
#
# In most cases, the quantile regression point estimates lie outside the
# OLS confidence interval, which suggests that the effect of income on food
# expenditure may not be constant across the distribution.
n = models.shape[0]
p1 = plt.plot(models.q, models.b, color='black', label='Quantile Reg.')
p2 = plt.plot(models.q, models.ub, linestyle='dotted', color='black')
p3 = plt.plot(models.q, models.lb, linestyle='dotted', color='black')
p4 = plt.plot(models.q, [ols['b']] * n, color='red', label='OLS')
p5 = plt.plot(models.q, [ols['lb']] * n, linestyle='dotted', color='red')
p6 = plt.plot(models.q, [ols['ub']] * n, linestyle='dotted', color='red')
plt.ylabel(r'$\beta_{income}$')
plt.xlabel('Quantiles of the conditional food expenditure distribution')
plt.legend()
plt.show()
| bsd-3-clause |
gticket/scikit-neuralnetwork | docs/conf.py | 5 | 1814 | # -*- coding: utf-8 -*-
#
# scikit-neuralnetwork documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 31 20:28:10 2015.
import sys
import os
project = u'scikit-neuralnetwork'
copyright = u'2015, scikit-neuralnetwork developers (BSD License)'
# -- Configuration of documentation -------------------------------------------
# sys.path.append(os.path.dirname(os.path.dirname(__file__)).encode('utf-8'))
import sknn
version = sknn.__version__
release = sknn.__version__
extensions = ['sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'numpydoc']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
todo_include_todos = False
# -- Overrides for modules ----------------------------------------------------
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
if name in ('BaseEstimator', 'TransformerMixin',
'RegressorMixin', 'ClassifierMixin'):
return object
return Mock()
MOCK_MODULES = ['numpy', 'theano',
'sklearn', 'sklearn.base', 'sklearn.pipeline',
'sklearn.cross_validation', 'sklearn.preprocessing']
for fullname in MOCK_MODULES:
segments = []
for s in fullname.split('.'):
segments.append(s)
mod_name = ".".join(segments)
if mod_name not in sys.modules:
sys.modules[mod_name] = Mock()
# -- Options for HTML output --------------------------------------------------
html_title = 'scikit-neuralnetwork documentation'
# html_logo = 'img/logo.png'
# html_favicon = 'img/favicon.ico'
html_static_path = ['_static']
htmlhelp_basename = 'sknndoc'
| bsd-3-clause |
apdjustino/urbansim | urbansim/models/transition.py | 4 | 17258 | """
Use the ``TransitionModel`` class with the different transitioners to
add or remove agents based on growth rates or target totals.
"""
from __future__ import division
import logging
import numpy as np
import pandas as pd
from . import util
from ..utils.logutil import log_start_finish
from ..utils.sampling import sample_rows
logger = logging.getLogger(__name__)
def _empty_index():
return pd.Index([])
def add_rows(data, nrows, starting_index=None, accounting_column=None):
"""
Add rows to data table according to a given nrows.
New rows will have their IDs set to NaN.
Parameters
----------
data : pandas.DataFrame
nrows : int
Number of rows to add.
starting_index : int, optional
The starting index from which to calculate indexes for the new
rows. If not given the max + 1 of the index of `data` will be used.
accounting_column: string, optional
Name of column with accounting totals/quanties to apply towards the control. If not provided
then row counts will be used for accounting.
Returns
-------
updated : pandas.DataFrame
Table with rows added. New rows will have their index values
set to NaN.
added : pandas.Index
New indexes of the rows that were added.
copied : pandas.Index
Indexes of rows that were copied. A row copied multiple times
will have multiple entries.
"""
logger.debug('start: adding {} rows in transition model'.format(nrows))
if nrows == 0:
return data, _empty_index(), _empty_index()
if not starting_index:
starting_index = data.index.values.max() + 1
new_rows = sample_rows(nrows, data, accounting_column=accounting_column)
copied_index = new_rows.index
added_index = pd.Index(np.arange(
starting_index, starting_index + len(new_rows.index), dtype=np.int))
new_rows.index = added_index
logger.debug(
'finish: added {} rows in transition model'.format(len(new_rows)))
return pd.concat([data, new_rows]), added_index, copied_index
def remove_rows(data, nrows, accounting_column=None):
"""
Remove a random `nrows` number of rows from a table.
Parameters
----------
data : DataFrame
nrows : float
Number of rows to remove.
accounting_column: string, optional
Name of column with accounting totals/quanties to apply towards the control. If not provided
then row counts will be used for accounting.
Returns
-------
updated : pandas.DataFrame
Table with random rows removed.
removed : pandas.Index
Indexes of the rows removed from the table.
"""
logger.debug('start: removing {} rows in transition model'.format(nrows))
nrows = abs(nrows) # in case a negative number came in
unit_check = data[accounting_column].sum() if accounting_column else len(data)
if nrows == 0:
return data, _empty_index()
elif nrows > unit_check:
raise ValueError('Number of rows to remove exceeds number of records in table.')
remove_rows = sample_rows(nrows, data, accounting_column=accounting_column, replace=False)
remove_index = remove_rows.index
logger.debug('finish: removed {} rows in transition model'.format(nrows))
return data.loc[data.index.difference(remove_index)], remove_index
def add_or_remove_rows(data, nrows, starting_index=None, accounting_column=None):
"""
Add or remove rows to/from a table. Rows are added
for positive `nrows` and removed for negative `nrows`.
Parameters
----------
data : DataFrame
nrows : float
Number of rows to add or remove.
starting_index : int, optional
The starting index from which to calculate indexes for new rows.
If not given the max + 1 of the index of `data` will be used.
(Not applicable if rows are being removed.)
Returns
-------
updated : pandas.DataFrame
Table with random rows removed.
added : pandas.Index
New indexes of the rows that were added.
copied : pandas.Index
Indexes of rows that were copied. A row copied multiple times
will have multiple entries.
removed : pandas.Index
Index of rows that were removed.
"""
if nrows > 0:
updated, added, copied = add_rows(
data, nrows, starting_index,
accounting_column=accounting_column)
removed = _empty_index()
elif nrows < 0:
updated, removed = remove_rows(data, nrows, accounting_column=accounting_column)
added, copied = _empty_index(), _empty_index()
else:
updated, added, copied, removed = \
data, _empty_index(), _empty_index(), _empty_index()
return updated, added, copied, removed
class GrowthRateTransition(object):
"""
Transition given tables using a simple growth rate.
Parameters
----------
growth_rate : float
accounting_column: string, optional
Name of column with accounting totals/quanties to apply towards the control. If not provided
then row counts will be used for accounting.
"""
def __init__(self, growth_rate, accounting_column=None):
self.growth_rate = growth_rate
self.accounting_column = accounting_column
def transition(self, data, year):
"""
Add or remove rows to/from a table according to the prescribed
growth rate for this model.
Parameters
----------
data : pandas.DataFrame
Rows will be removed from or added to this table.
year : None, optional
Here for compatibility with other transition models,
but ignored.
Returns
-------
updated : pandas.DataFrame
Table with rows removed or added.
added : pandas.Index
New indexes of the rows that were added.
copied : pandas.Index
Indexes of rows that were copied. A row copied multiple times
will have multiple entries.
removed : pandas.Index
Index of rows that were removed.
"""
if self.accounting_column is None:
nrows = int(round(len(data) * self.growth_rate))
else:
nrows = int(round(data[self.accounting_column].sum() * self.growth_rate))
with log_start_finish(
'adding {} rows via growth rate ({}) transition'.format(
nrows, self.growth_rate),
logger):
return add_or_remove_rows(data, nrows, accounting_column=self.accounting_column)
def __call__(self, data, year):
"""
Call `self.transition` with inputs.
"""
return self.transition(data, year)
class TabularGrowthRateTransition(object):
"""
Growth rate based transitions where the rates are stored in
a table indexed by year with optional segmentation.
Parameters
----------
growth_rates : pandas.DataFrame
rates_column : str
Name of the column in `growth_rates` that contains the rates.
accounting_column: string, optional
Name of column with accounting totals/quanties to apply towards the control. If not provided
then row counts will be used for accounting.
"""
def __init__(self, growth_rates, rates_column, accounting_column=None):
self.growth_rates = growth_rates
self.rates_column = rates_column
self.accounting_column = accounting_column
@property
def _config_table(self):
"""
Table that has transition configuration.
"""
return self.growth_rates
@property
def _config_column(self):
"""
Non-filter column in config table.
"""
return self.rates_column
def _calc_nrows(self, len_data, growth_rate):
"""
Calculate the number of rows to add to or remove from some data.
Parameters
----------
len_data : int
The current number of rows in the data table.
growth_rate : float
Growth rate as a fraction. Positive for growth, negative
for removing rows.
"""
return int(round(len_data * growth_rate))
def transition(self, data, year):
"""
Add or remove rows to/from a table according to the prescribed
growth rate for this model and year.
Parameters
----------
data : pandas.DataFrame
Rows will be removed from or added to this table.
year : None, optional
Here for compatibility with other transition models,
but ignored.
Returns
-------
updated : pandas.DataFrame
Table with rows removed or added.
added : pandas.Index
New indexes of the rows that were added.
copied : pandas.Index
Indexes of rows that were copied. A row copied multiple times
will have multiple entries.
removed : pandas.Index
Index of rows that were removed.
"""
logger.debug('start: tabular transition')
if year not in self._config_table.index:
raise ValueError('No targets for given year: {}'.format(year))
# want this to be a DataFrame
year_config = self._config_table.loc[[year]]
logger.debug('transitioning {} segments'.format(len(year_config)))
segments = []
added_indexes = []
copied_indexes = []
removed_indexes = []
# since we're looping over discrete segments we need to track
# out here where their new indexes will begin
starting_index = data.index.values.max() + 1
for _, row in year_config.iterrows():
subset = util.filter_table(data, row, ignore={self._config_column})
# Do not run on segment if it is empty
if len(subset) == 0:
logger.debug('empty segment encountered')
continue
if self.accounting_column is None:
nrows = self._calc_nrows(len(subset), row[self._config_column])
else:
nrows = self._calc_nrows(
subset[self.accounting_column].sum(),
row[self._config_column])
updated, added, copied, removed = \
add_or_remove_rows(subset, nrows, starting_index, self.accounting_column)
if nrows > 0:
# only update the starting index if rows were added
starting_index = starting_index + nrows
segments.append(updated)
added_indexes.append(added)
copied_indexes.append(copied)
removed_indexes.append(removed)
updated = pd.concat(segments)
added_indexes = util.concat_indexes(added_indexes)
copied_indexes = util.concat_indexes(copied_indexes)
removed_indexes = util.concat_indexes(removed_indexes)
logger.debug('finish: tabular transition')
return updated, added_indexes, copied_indexes, removed_indexes
def __call__(self, data, year):
"""
Call `self.transition` with inputs.
"""
return self.transition(data, year)
class TabularTotalsTransition(TabularGrowthRateTransition):
"""
Transition data via control totals in pandas DataFrame with
optional segmentation.
Parameters
----------
targets : pandas.DataFrame
totals_column : str
Name of the column in `targets` that contains the control totals.
accounting_column: string, optional
Name of column with accounting totals/quanties to apply towards the control. If not provided
then row counts will be used for accounting.
"""
def __init__(self, targets, totals_column, accounting_column=None):
self.targets = targets
self.totals_column = totals_column
self.accounting_column = accounting_column
@property
def _config_table(self):
"""
Table that has transition configuration.
"""
return self.targets
@property
def _config_column(self):
"""
Non-filter column in config table.
"""
return self.totals_column
def _calc_nrows(self, len_data, target_pop):
"""
Calculate the number of rows to add to or remove from some data.
Parameters
----------
len_data : int
The current number of rows in the data table.
target_pop : int
Target population.
"""
return target_pop - len_data
def transition(self, data, year):
"""
Add or remove rows to/from a table according to the prescribed
totals for this model and year.
Parameters
----------
data : pandas.DataFrame
Rows will be removed from or added to this table.
year : None, optional
Here for compatibility with other transition models,
but ignored.
Returns
-------
updated : pandas.DataFrame
Table with rows removed or added.
added : pandas.Index
New indexes of the rows that were added.
copied : pandas.Index
Indexes of rows that were copied. A row copied multiple times
will have multiple entries.
removed : pandas.Index
Index of rows that were removed.
"""
with log_start_finish('tabular totals transition', logger):
return super(TabularTotalsTransition, self).transition(data, year)
def _update_linked_table(table, col_name, added, copied, removed):
"""
Copy and update rows in a table that has a column referencing another
table that has had rows added via copying.
Parameters
----------
table : pandas.DataFrame
Table to update with new or removed rows.
col_name : str
Name of column in `table` that corresponds to the index values
in `copied` and `removed`.
added : pandas.Index
Indexes of rows that are new in the linked table.
copied : pandas.Index
Indexes of rows that were copied to make new rows in linked table.
removed : pandas.Index
Indexes of rows that were removed from the linked table.
Returns
-------
updated : pandas.DataFrame
"""
logger.debug('start: update linked table after transition')
# handle removals
table = table.loc[~table[col_name].isin(set(removed))]
if (added is None or len(added) == 0):
return table
# map new IDs to the IDs from which they were copied
id_map = pd.concat([pd.Series(copied, name=col_name), pd.Series(added, name='temp_id')], axis=1)
# join to linked table and assign new id
new_rows = id_map.merge(table, on=col_name)
new_rows.drop(col_name, axis=1, inplace=True)
new_rows.rename(columns={'temp_id': col_name}, inplace=True)
# index the new rows
starting_index = table.index.values.max() + 1
new_rows.index = np.arange(starting_index, starting_index + len(new_rows), dtype=np.int)
logger.debug('finish: update linked table after transition')
return pd.concat([table, new_rows])
class TransitionModel(object):
"""
Models things moving into or out of a region.
Parameters
----------
transitioner : callable
A callable that takes a data table and a year number and returns
and new data table, the indexes of rows added, the indexes
of rows copied, and the indexes of rows removed.
"""
def __init__(self, transitioner):
self.transitioner = transitioner
def transition(self, data, year, linked_tables=None):
"""
Add or remove rows from a table based on population targets.
Parameters
----------
data : pandas.DataFrame
Rows will be removed from or added to this table.
year : int
Year number that will be passed to `transitioner`.
linked_tables : dict of tuple, optional
Dictionary of (table, 'column name') pairs. The column name
should match the index of `data`. Indexes in `data` that
are copied or removed will also be copied and removed in
linked tables. They dictionary keys are used in the
returned `updated_links`.
Returns
-------
updated : pandas.DataFrame
Table with rows removed or added.
added : pandas.Series
Indexes of new rows in `updated`.
updated_links : dict of pandas.DataFrame
"""
logger.debug('start: transition')
linked_tables = linked_tables or {}
updated_links = {}
with log_start_finish('add/remove rows', logger):
updated, added, copied, removed = self.transitioner(data, year)
for table_name, (table, col) in linked_tables.items():
logger.debug('updating linked table {}'.format(table_name))
updated_links[table_name] = \
_update_linked_table(table, col, added, copied, removed)
logger.debug('finish: transition')
return updated, added, updated_links
| bsd-3-clause |
BinRoot/TensorFlow-Book | ch09_cnn/conv_visuals.py | 1 | 1920 | import numpy as np
import matplotlib.pyplot as plt
import cifar_tools
import tensorflow as tf
names, data, labels = \
cifar_tools.read_data('/home/binroot/res/cifar-10-batches-py')
def show_conv_results(data, filename=None):
plt.figure()
rows, cols = 4, 8
for i in range(np.shape(data)[3]):
img = data[0, :, :, i]
plt.subplot(rows, cols, i + 1)
plt.imshow(img, cmap='Greys_r', interpolation='none')
plt.axis('off')
if filename:
plt.savefig(filename)
else:
plt.show()
def show_weights(W, filename=None):
plt.figure()
rows, cols = 4, 8
for i in range(np.shape(W)[3]):
img = W[:, :, 0, i]
plt.subplot(rows, cols, i + 1)
plt.imshow(img, cmap='Greys_r', interpolation='none')
plt.axis('off')
if filename:
plt.savefig(filename)
else:
plt.show()
raw_data = data[4, :]
raw_img = np.reshape(raw_data, (24, 24))
plt.figure()
plt.imshow(raw_img, cmap='Greys_r')
plt.savefig('input_image.png')
x = tf.reshape(raw_data, shape=[-1, 24, 24, 1])
W = tf.Variable(tf.random_normal([5, 5, 1, 32]))
b = tf.Variable(tf.random_normal([32]))
conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
conv_with_b = tf.nn.bias_add(conv, b)
conv_out = tf.nn.relu(conv_with_b)
k = 2
maxpool = tf.nn.max_pool(conv_out, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
W_val = sess.run(W)
show_weights(W_val, 'step0_weights.png')
conv_val = sess.run(conv)
show_conv_results(conv_val, 'step1_convs.png')
print(np.shape(conv_val))
conv_out_val = sess.run(conv_out)
show_conv_results(conv_out_val, 'step2_conv_outs.png')
print(np.shape(conv_out_val))
maxpool_val = sess.run(maxpool)
show_conv_results(maxpool_val, 'step3_maxpool.png')
print(np.shape(maxpool_val))
| mit |
areeda/gwpy | examples/frequencyseries/transfer_function.py | 3 | 2451 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Plotting a transfer function
I would like to study how a signal transfers from one part of the
interferometer to another.
Specifically, it is interesting to measure the amplitude transfer of
ground motion through the HEPI system.
"""
__author__ = "Duncan Macleod <duncan.macleod@ligo.org>"
__currentmodule__ = 'gwpy.timeseries'
if __name__ == '__main__':
from matplotlib import pyplot
pyplot.ion()
# Before anything else, we import the objects we will need:
from gwpy.time import tconvert
from gwpy.timeseries import TimeSeriesDict
from gwpy.plot import BodePlot
# and set the times of our query, and the channels we want:
start = tconvert('May 27 2014 04:00')
end = start + 1800
gndchannel = 'L1:ISI-GND_STS_ITMY_Z_DQ'
hpichannel = 'L1:HPI-ITMY_BLND_L4C_Z_IN1_DQ'
# We can call the :meth:`~TimeSeriesDict.get` method of the `TimeSeriesDict`
# to retrieve all data in a single operation:
data = TimeSeriesDict.get([gndchannel, hpichannel], start, end, verbose=True)
gnd = data[gndchannel]
hpi = data[hpichannel]
# Next, we can call the :meth:`~TimeSeries.average_fft` method to calculate
# an averages, complex-valued FFT for each `TimeSeries`:
gndfft = gnd.average_fft(100, 50, window='hamming')
hpifft = hpi.average_fft(100, 50, window='hamming')
# Finally, we can divide one by the other to get the transfer function
# (up to the lower Nyquist)
size = min(gndfft.size, hpifft.size)
tf = hpifft[:size] / gndfft[:size]
# The `~gwpy.plot.BodePlot` knows how to separate a complex-valued
# `~gwpy.frequencyseries.FrequencySeries` into magnitude and phase:
plot = BodePlot(tf)
plot.maxes.set_title(
r'L1 ITMY ground $\rightarrow$ HPI transfer function')
plot.maxes.set_ylim(-55, 50)
plot.show()
| gpl-3.0 |
chase-qi/workload-automation | wlauto/instrumentation/energy_model/__init__.py | 2 | 42026 | # Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#pylint: disable=attribute-defined-outside-init,access-member-before-definition,redefined-outer-name
from __future__ import division
import os
import math
import time
from tempfile import mktemp
from base64 import b64encode
from collections import Counter, namedtuple
try:
import jinja2
import pandas as pd
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt
import numpy as np
low_filter = np.vectorize(lambda x: x > 0 and x or 0) # pylint: disable=no-member
import_error = None
except ImportError as e:
import_error = e
jinja2 = None
pd = None
plt = None
np = None
low_filter = None
from wlauto import Instrument, Parameter, File
from wlauto.exceptions import ConfigError, InstrumentError, DeviceError
from wlauto.instrumentation import instrument_is_installed
from wlauto.utils.types import caseless_string, list_or_caseless_string, list_of_ints
from wlauto.utils.misc import list_to_mask
FREQ_TABLE_FILE = 'frequency_power_perf_data.csv'
CPUS_TABLE_FILE = 'projected_cap_power.csv'
MEASURED_CPUS_TABLE_FILE = 'measured_cap_power.csv'
IDLE_TABLE_FILE = 'idle_power_perf_data.csv'
REPORT_TEMPLATE_FILE = 'report.template'
EM_TEMPLATE_FILE = 'em.template'
IdlePowerState = namedtuple('IdlePowerState', ['power'])
CapPowerState = namedtuple('CapPowerState', ['cap', 'power'])
class EnergyModel(object):
def __init__(self):
self.big_cluster_idle_states = []
self.little_cluster_idle_states = []
self.big_cluster_cap_states = []
self.little_cluster_cap_states = []
self.big_core_idle_states = []
self.little_core_idle_states = []
self.big_core_cap_states = []
self.little_core_cap_states = []
def add_cap_entry(self, cluster, perf, clust_pow, core_pow):
if cluster == 'big':
self.big_cluster_cap_states.append(CapPowerState(perf, clust_pow))
self.big_core_cap_states.append(CapPowerState(perf, core_pow))
elif cluster == 'little':
self.little_cluster_cap_states.append(CapPowerState(perf, clust_pow))
self.little_core_cap_states.append(CapPowerState(perf, core_pow))
else:
raise ValueError('Unexpected cluster: {}'.format(cluster))
def add_cluster_idle(self, cluster, values):
for value in values:
if cluster == 'big':
self.big_cluster_idle_states.append(IdlePowerState(value))
elif cluster == 'little':
self.little_cluster_idle_states.append(IdlePowerState(value))
else:
raise ValueError('Unexpected cluster: {}'.format(cluster))
def add_core_idle(self, cluster, values):
for value in values:
if cluster == 'big':
self.big_core_idle_states.append(IdlePowerState(value))
elif cluster == 'little':
self.little_core_idle_states.append(IdlePowerState(value))
else:
raise ValueError('Unexpected cluster: {}'.format(cluster))
class PowerPerformanceAnalysis(object):
def __init__(self, data):
self.summary = {}
big_freqs = data[data.cluster == 'big'].frequency.unique()
little_freqs = data[data.cluster == 'little'].frequency.unique()
self.summary['frequency'] = max(set(big_freqs).intersection(set(little_freqs)))
big_sc = data[(data.cluster == 'big') &
(data.frequency == self.summary['frequency']) &
(data.cpus == 1)]
little_sc = data[(data.cluster == 'little') &
(data.frequency == self.summary['frequency']) &
(data.cpus == 1)]
self.summary['performance_ratio'] = big_sc.performance.item() / little_sc.performance.item()
self.summary['power_ratio'] = big_sc.power.item() / little_sc.power.item()
self.summary['max_performance'] = data[data.cpus == 1].performance.max()
self.summary['max_power'] = data[data.cpus == 1].power.max()
def build_energy_model(freq_power_table, cpus_power, idle_power, first_cluster_idle_state):
# pylint: disable=too-many-locals
em = EnergyModel()
idle_power_sc = idle_power[idle_power.cpus == 1]
perf_data = get_normalized_single_core_data(freq_power_table)
for cluster in ['little', 'big']:
cluster_cpus_power = cpus_power[cluster].dropna()
cluster_power = cluster_cpus_power['cluster'].apply(int)
core_power = (cluster_cpus_power['1'] - cluster_power).apply(int)
performance = (perf_data[perf_data.cluster == cluster].performance_norm * 1024 / 100).apply(int)
for perf, clust_pow, core_pow in zip(performance, cluster_power, core_power):
em.add_cap_entry(cluster, perf, clust_pow, core_pow)
all_idle_power = idle_power_sc[idle_power_sc.cluster == cluster].power.values
# CORE idle states
# We want the delta of each state w.r.t. the power
# consumption of the shallowest one at this level (core_ref)
idle_core_power = low_filter(all_idle_power[:first_cluster_idle_state] -
all_idle_power[first_cluster_idle_state - 1])
# CLUSTER idle states
# We want the absolute value of each idle state
idle_cluster_power = low_filter(all_idle_power[first_cluster_idle_state - 1:])
em.add_cluster_idle(cluster, idle_cluster_power)
em.add_core_idle(cluster, idle_core_power)
return em
def generate_em_c_file(em, big_core, little_core, em_template_file, outfile):
with open(em_template_file) as fh:
em_template = jinja2.Template(fh.read())
em_text = em_template.render(
big_core=big_core,
little_core=little_core,
em=em,
)
with open(outfile, 'w') as wfh:
wfh.write(em_text)
return em_text
def generate_report(freq_power_table, measured_cpus_table, cpus_table, idle_power_table, # pylint: disable=unused-argument
report_template_file, device_name, em_text, outfile):
# pylint: disable=too-many-locals
cap_power_analysis = PowerPerformanceAnalysis(freq_power_table)
single_core_norm = get_normalized_single_core_data(freq_power_table)
cap_power_plot = get_cap_power_plot(single_core_norm)
idle_power_plot = get_idle_power_plot(idle_power_table)
fig, axes = plt.subplots(1, 2)
fig.set_size_inches(16, 8)
for i, cluster in enumerate(reversed(cpus_table.columns.levels[0])):
projected = cpus_table[cluster].dropna(subset=['1'])
plot_cpus_table(projected, axes[i], cluster)
cpus_plot_data = get_figure_data(fig)
with open(report_template_file) as fh:
report_template = jinja2.Template(fh.read())
html = report_template.render(
device_name=device_name,
freq_power_table=freq_power_table.set_index(['cluster', 'cpus', 'frequency']).to_html(),
cap_power_analysis=cap_power_analysis,
cap_power_plot=get_figure_data(cap_power_plot),
idle_power_table=idle_power_table.set_index(['cluster', 'cpus', 'state']).to_html(),
idle_power_plot=get_figure_data(idle_power_plot),
cpus_table=cpus_table.to_html(),
cpus_plot=cpus_plot_data,
em_text=em_text,
)
with open(outfile, 'w') as wfh:
wfh.write(html)
return html
def wa_result_to_power_perf_table(df, performance_metric, index):
table = df.pivot_table(index=index + ['iteration'],
columns='metric', values='value').reset_index()
result_mean = table.groupby(index).mean()
result_std = table.groupby(index).std()
result_std.columns = [c + ' std' for c in result_std.columns]
result_count = table.groupby(index).count()
result_count.columns = [c + ' count' for c in result_count.columns]
count_sqrt = result_count.apply(lambda x: x.apply(math.sqrt))
count_sqrt.columns = result_std.columns # match column names for division
result_error = 1.96 * result_std / count_sqrt # 1.96 == 95% confidence interval
result_error.columns = [c + ' error' for c in result_mean.columns]
result = pd.concat([result_mean, result_std, result_count, result_error], axis=1)
del result['iteration']
del result['iteration std']
del result['iteration count']
del result['iteration error']
updated_columns = []
for column in result.columns:
if column == performance_metric:
updated_columns.append('performance')
elif column == performance_metric + ' std':
updated_columns.append('performance_std')
elif column == performance_metric + ' error':
updated_columns.append('performance_error')
else:
updated_columns.append(column.replace(' ', '_'))
result.columns = updated_columns
result = result[sorted(result.columns)]
result.reset_index(inplace=True)
return result
def get_figure_data(fig, fmt='png'):
tmp = mktemp()
fig.savefig(tmp, format=fmt, bbox_inches='tight')
with open(tmp, 'rb') as fh:
image_data = b64encode(fh.read())
os.remove(tmp)
return image_data
def get_normalized_single_core_data(data):
finite_power = np.isfinite(data.power) # pylint: disable=no-member
finite_perf = np.isfinite(data.performance) # pylint: disable=no-member
data_single_core = data[(data.cpus == 1) & finite_perf & finite_power].copy()
data_single_core['performance_norm'] = (data_single_core.performance /
data_single_core.performance.max() * 100).apply(int)
data_single_core['power_norm'] = (data_single_core.power /
data_single_core.power.max() * 100).apply(int)
return data_single_core
def get_cap_power_plot(data_single_core):
big_single_core = data_single_core[(data_single_core.cluster == 'big') &
(data_single_core.cpus == 1)]
little_single_core = data_single_core[(data_single_core.cluster == 'little') &
(data_single_core.cpus == 1)]
fig, axes = plt.subplots(1, 1, figsize=(12, 8))
axes.plot(big_single_core.performance_norm,
big_single_core.power_norm,
marker='o')
axes.plot(little_single_core.performance_norm,
little_single_core.power_norm,
marker='o')
axes.set_xlim(0, 105)
axes.set_ylim(0, 105)
axes.set_xlabel('Performance (Normalized)')
axes.set_ylabel('Power (Normalized)')
axes.grid()
axes.legend(['big cluster', 'little cluster'], loc=0)
return fig
def get_idle_power_plot(df):
fig, axes = plt.subplots(1, 2, figsize=(15, 7))
for cluster, ax in zip(['little', 'big'], axes):
data = df[df.cluster == cluster].pivot_table(index=['state'], columns='cpus', values='power')
err = df[df.cluster == cluster].pivot_table(index=['state'], columns='cpus', values='power_error')
data.plot(kind='bar', ax=ax, rot=30, yerr=err)
ax.set_title('{} cluster'.format(cluster))
ax.set_xlim(-1, len(data.columns) - 0.5)
ax.set_ylabel('Power (mW)')
return fig
def fit_polynomial(s, n):
# pylint: disable=no-member
coeffs = np.polyfit(s.index, s.values, n)
poly = np.poly1d(coeffs)
return poly(s.index)
def get_cpus_power_table(data, index, opps, leak_factors): # pylint: disable=too-many-locals
# pylint: disable=no-member
power_table = data[[index, 'cluster', 'cpus', 'power']].pivot_table(index=index,
columns=['cluster', 'cpus'],
values='power')
bs_power_table = pd.DataFrame(index=power_table.index, columns=power_table.columns)
for cluster in power_table.columns.levels[0]:
power_table[cluster, 0] = (power_table[cluster, 1] -
(power_table[cluster, 2] -
power_table[cluster, 1]))
bs_power_table.loc[power_table[cluster, 1].notnull(), (cluster, 1)] = fit_polynomial(power_table[cluster, 1].dropna(), 2)
bs_power_table.loc[power_table[cluster, 2].notnull(), (cluster, 2)] = fit_polynomial(power_table[cluster, 2].dropna(), 2)
if opps[cluster] is None:
bs_power_table.loc[bs_power_table[cluster, 1].notnull(), (cluster, 0)] = \
(2 * power_table[cluster, 1] - power_table[cluster, 2]).values
else:
voltages = opps[cluster].set_index('frequency').sort_index()
leakage = leak_factors[cluster] * 2 * voltages['voltage']**3 / 0.9**3
leakage_delta = leakage - leakage[leakage.index[0]]
bs_power_table.loc[:, (cluster, 0)] = \
(2 * bs_power_table[cluster, 1] + leakage_delta - bs_power_table[cluster, 2])
# re-order columns and rename colum '0' to 'cluster'
power_table = power_table[sorted(power_table.columns,
cmp=lambda x, y: cmp(y[0], x[0]) or cmp(x[1], y[1]))]
bs_power_table = bs_power_table[sorted(bs_power_table.columns,
cmp=lambda x, y: cmp(y[0], x[0]) or cmp(x[1], y[1]))]
old_levels = power_table.columns.levels
power_table.columns.set_levels([old_levels[0], list(map(str, old_levels[1])[:-1]) + ['cluster']],
inplace=True)
bs_power_table.columns.set_levels([old_levels[0], list(map(str, old_levels[1])[:-1]) + ['cluster']],
inplace=True)
return power_table, bs_power_table
def plot_cpus_table(projected, ax, cluster):
projected.T.plot(ax=ax, marker='o')
ax.set_title('{} cluster'.format(cluster))
ax.set_xticklabels(projected.columns)
ax.set_xticks(range(0, 5))
ax.set_xlim(-0.5, len(projected.columns) - 0.5)
ax.set_ylabel('Power (mW)')
ax.grid(True)
def opp_table(d):
if d is None:
return None
return pd.DataFrame(d.items(), columns=['frequency', 'voltage'])
class EnergyModelInstrument(Instrument):
name = 'energy_model'
desicription = """
Generates a power mode for the device based on specified workload.
This insturment will execute the workload specified by the agenda (currently, only ``sysbench`` is
supported) and will use the resulting performance and power measurments to generate a power mode for
the device.
This instrument requires certain features to be present in the kernel:
1. cgroups and cpusets must be enabled.
2. cpufreq and userspace governor must be enabled.
3. cpuidle must be enabled.
"""
parameters = [
Parameter('device_name', kind=caseless_string,
description="""The name of the device to be used in generating the model. If not specified,
``device.name`` will be used. """),
Parameter('big_core', kind=caseless_string,
description="""The name of the "big" core in the big.LITTLE system; must match
one of the values in ``device.core_names``. """),
Parameter('performance_metric', kind=caseless_string, mandatory=True,
description="""Metric to be used as the performance indicator."""),
Parameter('power_metric', kind=list_or_caseless_string,
description="""Metric to be used as the power indicator. The value may contain a
``{core}`` format specifier that will be replaced with names of big
and little cores to drive the name of the metric for that cluster.
Ether this or ``energy_metric`` must be specified but not both."""),
Parameter('energy_metric', kind=list_or_caseless_string,
description="""Metric to be used as the energy indicator. The value may contain a
``{core}`` format specifier that will be replaced with names of big
and little cores to drive the name of the metric for that cluster.
this metric will be used to derive power by deviding through by
execution time. Either this or ``power_metric`` must be specified, but
not both."""),
Parameter('power_scaling_factor', kind=float, default=1.0,
description="""Power model specfies power in milliWatts. This is a scaling factor that
power_metric values will be multiplied by to get milliWatts."""),
Parameter('big_frequencies', kind=list_of_ints,
description="""List of frequencies to be used for big cores. These frequencies must
be supported by the cores. If this is not specified, all available
frequencies for the core (as read from cpufreq) will be used."""),
Parameter('little_frequencies', kind=list_of_ints,
description="""List of frequencies to be used for little cores. These frequencies must
be supported by the cores. If this is not specified, all available
frequencies for the core (as read from cpufreq) will be used."""),
Parameter('idle_workload', kind=str, default='idle',
description="Workload to be used while measuring idle power."),
Parameter('idle_workload_params', kind=dict, default={},
description="Parameter to pass to the idle workload."),
Parameter('first_cluster_idle_state', kind=int, default=-1,
description='''The index of the first cluster idle state on the device. Previous states
are assumed to be core idles. The default is ``-1``, i.e. only the last
idle state is assumed to affect the entire cluster.'''),
Parameter('no_hotplug', kind=bool, default=False,
description='''This options allows running the instrument without hotpluging cores on and off.
Disabling hotplugging will most likely produce a less accurate power model.'''),
Parameter('num_of_freqs_to_thermal_adjust', kind=int, default=0,
description="""The number of frequencies begining from the highest, to be adjusted for
the thermal effect."""),
Parameter('big_opps', kind=opp_table,
description="""OPP table mapping frequency to voltage (kHz --> mV) for the big cluster."""),
Parameter('little_opps', kind=opp_table,
description="""OPP table mapping frequency to voltage (kHz --> mV) for the little cluster."""),
Parameter('big_leakage', kind=int, default=120,
description="""
Leakage factor for the big cluster (this is specific to a particular core implementation).
"""),
Parameter('little_leakage', kind=int, default=60,
description="""
Leakage factor for the little cluster (this is specific to a particular core implementation).
"""),
]
def validate(self):
if import_error:
message = 'energy_model instrument requires pandas, jinja2 and matplotlib Python packages to be installed; got: "{}"'
raise InstrumentError(message.format(import_error.message))
for capability in ['cgroups', 'cpuidle']:
if not self.device.has(capability):
message = 'The Device does not appear to support {}; does it have the right module installed?'
raise ConfigError(message.format(capability))
device_cores = set(self.device.core_names)
if (self.power_metric and self.energy_metric) or not (self.power_metric or self.energy_metric):
raise ConfigError('Either power_metric or energy_metric must be specified (but not both).')
if not device_cores:
raise ConfigError('The Device does not appear to have core_names configured.')
elif len(device_cores) != 2:
raise ConfigError('The Device does not appear to be a big.LITTLE device.')
if self.big_core and self.big_core not in self.device.core_names:
raise ConfigError('Specified big_core "{}" is in divice {}'.format(self.big_core, self.device.name))
if not self.big_core:
self.big_core = self.device.core_names[-1] # the last core is usually "big" in existing big.LITTLE devices
if not self.device_name:
self.device_name = self.device.name
if self.num_of_freqs_to_thermal_adjust and not instrument_is_installed('daq'):
self.logger.warn('Adjustment for thermal effect requires daq instrument. Disabling adjustment')
self.num_of_freqs_to_thermal_adjust = 0
def initialize(self, context):
self.number_of_cpus = {}
self.report_template_file = context.resolver.get(File(self, REPORT_TEMPLATE_FILE))
self.em_template_file = context.resolver.get(File(self, EM_TEMPLATE_FILE))
self.little_core = (set(self.device.core_names) - set([self.big_core])).pop()
self.perform_runtime_validation()
self.enable_all_cores()
self.configure_clusters()
self.discover_idle_states()
self.disable_thermal_management()
self.initialize_job_queue(context)
self.initialize_result_tracking()
def setup(self, context):
if not context.spec.label.startswith('idle_'):
return
for idle_state in self.get_device_idle_states(self.measured_cluster):
if idle_state.index > context.spec.idle_state_index:
idle_state.disable = 1
else:
idle_state.disable = 0
def fast_start(self, context): # pylint: disable=unused-argument
self.start_time = time.time()
def fast_stop(self, context): # pylint: disable=unused-argument
self.run_time = time.time() - self.start_time
def on_iteration_start(self, context):
self.setup_measurement(context.spec.cluster)
def thermal_correction(self, context):
if not self.num_of_freqs_to_thermal_adjust or self.num_of_freqs_to_thermal_adjust > len(self.big_frequencies):
return 0
freqs = self.big_frequencies[-self.num_of_freqs_to_thermal_adjust:]
spec = context.result.spec
if spec.frequency not in freqs:
return 0
data_path = os.path.join(context.output_directory, 'daq', '{}.csv'.format(self.big_core))
data = pd.read_csv(data_path)['power']
return _adjust_for_thermal(data, filt_method=lambda x: pd.rolling_median(x, 1000), thresh=0.9, window=5000)
# slow to make sure power results have been generated
def slow_update_result(self, context): # pylint: disable=too-many-branches
spec = context.result.spec
cluster = spec.cluster
is_freq_iteration = spec.label.startswith('freq_')
perf_metric = 0
power_metric = 0
thermal_adjusted_power = 0
if is_freq_iteration and cluster == 'big':
thermal_adjusted_power = self.thermal_correction(context)
for metric in context.result.metrics:
if metric.name == self.performance_metric:
perf_metric = metric.value
elif thermal_adjusted_power and metric.name in self.big_power_metrics:
power_metric += thermal_adjusted_power * self.power_scaling_factor
elif (cluster == 'big') and metric.name in self.big_power_metrics:
power_metric += metric.value * self.power_scaling_factor
elif (cluster == 'little') and metric.name in self.little_power_metrics:
power_metric += metric.value * self.power_scaling_factor
elif thermal_adjusted_power and metric.name in self.big_energy_metrics:
power_metric += thermal_adjusted_power / self.run_time * self.power_scaling_factor
elif (cluster == 'big') and metric.name in self.big_energy_metrics:
power_metric += metric.value / self.run_time * self.power_scaling_factor
elif (cluster == 'little') and metric.name in self.little_energy_metrics:
power_metric += metric.value / self.run_time * self.power_scaling_factor
if not (power_metric and (perf_metric or not is_freq_iteration)):
message = 'Incomplete results for {} iteration{}'
raise InstrumentError(message.format(context.result.spec.id, context.current_iteration))
if is_freq_iteration:
index_matter = [cluster, spec.num_cpus,
spec.frequency, context.result.iteration]
data = self.freq_data
else:
index_matter = [cluster, spec.num_cpus,
spec.idle_state_id, spec.idle_state_desc, context.result.iteration]
data = self.idle_data
if self.no_hotplug:
# due to that fact that hotpluging was disabled, power has to be artificially scaled
# to the number of cores that should have been active if hotplugging had occurred.
power_metric = spec.num_cpus * (power_metric / self.number_of_cpus[cluster])
data.append(index_matter + ['performance', perf_metric])
data.append(index_matter + ['power', power_metric])
def before_overall_results_processing(self, context):
# pylint: disable=too-many-locals
if not self.idle_data or not self.freq_data:
self.logger.warning('Run aborted early; not generating energy_model.')
return
output_directory = os.path.join(context.output_directory, 'energy_model')
os.makedirs(output_directory)
df = pd.DataFrame(self.idle_data, columns=['cluster', 'cpus', 'state_id',
'state', 'iteration', 'metric', 'value'])
idle_power_table = wa_result_to_power_perf_table(df, '', index=['cluster', 'cpus', 'state'])
idle_output = os.path.join(output_directory, IDLE_TABLE_FILE)
with open(idle_output, 'w') as wfh:
idle_power_table.to_csv(wfh, index=False)
context.add_artifact('idle_power_table', idle_output, 'export')
df = pd.DataFrame(self.freq_data,
columns=['cluster', 'cpus', 'frequency', 'iteration', 'metric', 'value'])
freq_power_table = wa_result_to_power_perf_table(df, self.performance_metric,
index=['cluster', 'cpus', 'frequency'])
freq_output = os.path.join(output_directory, FREQ_TABLE_FILE)
with open(freq_output, 'w') as wfh:
freq_power_table.to_csv(wfh, index=False)
context.add_artifact('freq_power_table', freq_output, 'export')
if self.big_opps is None or self.little_opps is None:
message = 'OPPs not specified for one or both clusters; cluster power will not be adjusted for leakage.'
self.logger.warning(message)
opps = {'big': self.big_opps, 'little': self.little_opps}
leakages = {'big': self.big_leakage, 'little': self.little_leakage}
try:
measured_cpus_table, cpus_table = get_cpus_power_table(freq_power_table, 'frequency', opps, leakages)
except (ValueError, KeyError, IndexError) as e:
self.logger.error('Could not create cpu power tables: {}'.format(e))
return
measured_cpus_output = os.path.join(output_directory, MEASURED_CPUS_TABLE_FILE)
with open(measured_cpus_output, 'w') as wfh:
measured_cpus_table.to_csv(wfh)
context.add_artifact('measured_cpus_table', measured_cpus_output, 'export')
cpus_output = os.path.join(output_directory, CPUS_TABLE_FILE)
with open(cpus_output, 'w') as wfh:
cpus_table.to_csv(wfh)
context.add_artifact('cpus_table', cpus_output, 'export')
em = build_energy_model(freq_power_table, cpus_table, idle_power_table, self.first_cluster_idle_state)
em_file = os.path.join(output_directory, '{}_em.c'.format(self.device_name))
em_text = generate_em_c_file(em, self.big_core, self.little_core,
self.em_template_file, em_file)
context.add_artifact('em', em_file, 'data')
report_file = os.path.join(output_directory, 'report.html')
generate_report(freq_power_table, measured_cpus_table, cpus_table,
idle_power_table, self.report_template_file,
self.device_name, em_text, report_file)
context.add_artifact('pm_report', report_file, 'export')
def initialize_result_tracking(self):
self.freq_data = []
self.idle_data = []
self.big_power_metrics = []
self.little_power_metrics = []
self.big_energy_metrics = []
self.little_energy_metrics = []
if self.power_metric:
self.big_power_metrics = [pm.format(core=self.big_core) for pm in self.power_metric]
self.little_power_metrics = [pm.format(core=self.little_core) for pm in self.power_metric]
else: # must be energy_metric
self.big_energy_metrics = [em.format(core=self.big_core) for em in self.energy_metric]
self.little_energy_metrics = [em.format(core=self.little_core) for em in self.energy_metric]
def configure_clusters(self):
self.measured_cores = None
self.measuring_cores = None
self.cpuset = self.device.get_cgroup_controller('cpuset')
self.cpuset.create_group('big', self.big_cpus, [0])
self.cpuset.create_group('little', self.little_cpus, [0])
for cluster in set(self.device.core_clusters):
self.device.set_cluster_governor(cluster, 'userspace')
def discover_idle_states(self):
online_cpu = self.device.get_online_cpus(self.big_core)[0]
self.big_idle_states = self.device.get_cpuidle_states(online_cpu)
online_cpu = self.device.get_online_cpus(self.little_core)[0]
self.little_idle_states = self.device.get_cpuidle_states(online_cpu)
if not (len(self.big_idle_states) >= 2 and len(self.little_idle_states) >= 2):
raise DeviceError('There do not appeart to be at least two idle states '
'on at least one of the clusters.')
def setup_measurement(self, measured):
measuring = 'big' if measured == 'little' else 'little'
self.measured_cluster = measured
self.measuring_cluster = measuring
self.measured_cpus = self.big_cpus if measured == 'big' else self.little_cpus
self.measuring_cpus = self.little_cpus if measured == 'big' else self.big_cpus
self.reset()
def reset(self):
self.enable_all_cores()
self.enable_all_idle_states()
self.reset_cgroups()
self.cpuset.move_all_tasks_to(self.measuring_cluster)
server_process = 'adbd' if self.device.platform == 'android' else 'sshd'
server_pids = self.device.get_pids_of(server_process)
children_ps = [e for e in self.device.ps()
if e.ppid in server_pids and e.name != 'sshd']
children_pids = [e.pid for e in children_ps]
pids_to_move = server_pids + children_pids
self.cpuset.root.add_tasks(pids_to_move)
for pid in pids_to_move:
try:
self.device.execute('busybox taskset -p 0x{:x} {}'.format(list_to_mask(self.measuring_cpus), pid))
except DeviceError:
pass
def enable_all_cores(self):
counter = Counter(self.device.core_names)
for core, number in counter.iteritems():
self.device.set_number_of_online_cpus(core, number)
self.big_cpus = self.device.get_online_cpus(self.big_core)
self.little_cpus = self.device.get_online_cpus(self.little_core)
def enable_all_idle_states(self):
for cpu in self.device.online_cpus:
for state in self.device.get_cpuidle_states(cpu):
state.disable = 0
def reset_cgroups(self):
self.big_cpus = self.device.get_online_cpus(self.big_core)
self.little_cpus = self.device.get_online_cpus(self.little_core)
self.cpuset.big.set(self.big_cpus, 0)
self.cpuset.little.set(self.little_cpus, 0)
def perform_runtime_validation(self):
if not self.device.is_rooted:
raise InstrumentError('the device must be rooted to generate energy models')
if 'userspace' not in self.device.list_available_cluster_governors(0):
raise InstrumentError('userspace cpufreq governor must be enabled')
error_message = 'Frequency {} is not supported by {} cores'
available_frequencies = self.device.list_available_core_frequencies(self.big_core)
if self.big_frequencies:
for freq in self.big_frequencies:
if freq not in available_frequencies:
raise ConfigError(error_message.format(freq, self.big_core))
else:
self.big_frequencies = available_frequencies
available_frequencies = self.device.list_available_core_frequencies(self.little_core)
if self.little_frequencies:
for freq in self.little_frequencies:
if freq not in available_frequencies:
raise ConfigError(error_message.format(freq, self.little_core))
else:
self.little_frequencies = available_frequencies
def initialize_job_queue(self, context):
old_specs = []
for job in context.runner.job_queue:
if job.spec not in old_specs:
old_specs.append(job.spec)
new_specs = self.get_cluster_specs(old_specs, 'big', context)
new_specs.extend(self.get_cluster_specs(old_specs, 'little', context))
# Update config to refect jobs that will actually run.
context.config.workload_specs = new_specs
config_file = os.path.join(context.host_working_directory, 'run_config.json')
with open(config_file, 'wb') as wfh:
context.config.serialize(wfh)
context.runner.init_queue(new_specs)
def get_cluster_specs(self, old_specs, cluster, context):
core = self.get_core_name(cluster)
self.number_of_cpus[cluster] = sum([1 for c in self.device.core_names if c == core])
cluster_frequencies = self.get_frequencies_param(cluster)
if not cluster_frequencies:
raise InstrumentError('Could not read available frequencies for {}'.format(core))
min_frequency = min(cluster_frequencies)
idle_states = self.get_device_idle_states(cluster)
new_specs = []
for state in idle_states:
for num_cpus in xrange(1, self.number_of_cpus[cluster] + 1):
spec = old_specs[0].copy()
spec.workload_name = self.idle_workload
spec.workload_parameters = self.idle_workload_params
spec.idle_state_id = state.id
spec.idle_state_desc = state.desc
spec.idle_state_index = state.index
if not self.no_hotplug:
spec.runtime_parameters['{}_cores'.format(core)] = num_cpus
spec.runtime_parameters['{}_frequency'.format(core)] = min_frequency
spec.runtime_parameters['ui'] = 'off'
spec.cluster = cluster
spec.num_cpus = num_cpus
spec.id = '{}_idle_{}_{}'.format(cluster, state.id, num_cpus)
spec.label = 'idle_{}'.format(cluster)
spec.number_of_iterations = old_specs[0].number_of_iterations
spec.load(self.device, context.config.ext_loader)
spec.workload.init_resources(context)
spec.workload.validate()
new_specs.append(spec)
for old_spec in old_specs:
if old_spec.workload_name not in ['sysbench', 'dhrystone']:
raise ConfigError('Only sysbench and dhrystone workloads currently supported for energy_model generation.')
for freq in cluster_frequencies:
for num_cpus in xrange(1, self.number_of_cpus[cluster] + 1):
spec = old_spec.copy()
spec.runtime_parameters['{}_frequency'.format(core)] = freq
if not self.no_hotplug:
spec.runtime_parameters['{}_cores'.format(core)] = num_cpus
spec.runtime_parameters['ui'] = 'off'
spec.id = '{}_{}_{}'.format(cluster, num_cpus, freq)
spec.label = 'freq_{}_{}'.format(cluster, spec.label)
spec.workload_parameters['taskset_mask'] = list_to_mask(self.get_cpus(cluster))
spec.workload_parameters['threads'] = num_cpus
if old_spec.workload_name == 'sysbench':
# max_requests set to an arbitrary high values to make sure
# sysbench runs for full duriation even on highly
# performant cores.
spec.workload_parameters['max_requests'] = 10000000
spec.cluster = cluster
spec.num_cpus = num_cpus
spec.frequency = freq
spec.load(self.device, context.config.ext_loader)
spec.workload.init_resources(context)
spec.workload.validate()
new_specs.append(spec)
return new_specs
def disable_thermal_management(self):
if self.device.file_exists('/sys/class/thermal/thermal_zone0'):
tzone_paths = self.device.execute('ls /sys/class/thermal/thermal_zone*')
for tzpath in tzone_paths.strip().split():
mode_file = '{}/mode'.format(tzpath)
if self.device.file_exists(mode_file):
self.device.set_sysfile_value(mode_file, 'disabled')
def get_device_idle_states(self, cluster):
if cluster == 'big':
online_cpus = self.device.get_online_cpus(self.big_core)
else:
online_cpus = self.device.get_online_cpus(self.little_core)
idle_states = []
for cpu in online_cpus:
idle_states.extend(self.device.get_cpuidle_states(cpu))
return idle_states
def get_core_name(self, cluster):
if cluster == 'big':
return self.big_core
else:
return self.little_core
def get_cpus(self, cluster):
if cluster == 'big':
return self.big_cpus
else:
return self.little_cpus
def get_frequencies_param(self, cluster):
if cluster == 'big':
return self.big_frequencies
else:
return self.little_frequencies
def _adjust_for_thermal(data, filt_method=lambda x: x, thresh=0.9, window=5000, tdiff_threshold=10000):
n = filt_method(data)
n = n[~np.isnan(n)] # pylint: disable=no-member
d = np.diff(n) # pylint: disable=no-member
d = d[~np.isnan(d)] # pylint: disable=no-member
dmin = min(d)
dmax = max(d)
index_up = np.max((d > dmax * thresh).nonzero()) # pylint: disable=no-member
index_down = np.min((d < dmin * thresh).nonzero()) # pylint: disable=no-member
low_average = np.average(n[index_up:index_up + window]) # pylint: disable=no-member
high_average = np.average(n[index_down - window:index_down]) # pylint: disable=no-member
if low_average > high_average or index_down - index_up < tdiff_threshold:
return 0
else:
return low_average
if __name__ == '__main__':
import sys # pylint: disable=wrong-import-position,wrong-import-order
indir, outdir = sys.argv[1], sys.argv[2]
device_name = 'odroidxu3'
big_core = 'a15'
little_core = 'a7'
first_cluster_idle_state = -1
this_dir = os.path.dirname(__file__)
report_template_file = os.path.join(this_dir, REPORT_TEMPLATE_FILE)
em_template_file = os.path.join(this_dir, EM_TEMPLATE_FILE)
freq_power_table = pd.read_csv(os.path.join(indir, FREQ_TABLE_FILE))
measured_cpus_table, cpus_table = pd.read_csv(os.path.join(indir, CPUS_TABLE_FILE), # pylint: disable=unbalanced-tuple-unpacking
header=range(2), index_col=0)
idle_power_table = pd.read_csv(os.path.join(indir, IDLE_TABLE_FILE))
if not os.path.exists(outdir):
os.makedirs(outdir)
report_file = os.path.join(outdir, 'report.html')
em_file = os.path.join(outdir, '{}_em.c'.format(device_name))
em = build_energy_model(freq_power_table, cpus_table,
idle_power_table, first_cluster_idle_state)
em_text = generate_em_c_file(em, big_core, little_core,
em_template_file, em_file)
generate_report(freq_power_table, measured_cpus_table, cpus_table,
idle_power_table, report_template_file, device_name,
em_text, report_file)
| apache-2.0 |
garibaldu/radioblobs | other_scores/kl3.py | 1 | 2900 | import sys
import numpy as np, numpy.random as rng
import pylab as pl, matplotlib.cm as cm
def do_exit():
print ('usage: python %s [image_size num_sources noise_size output_image_name]' % (sys.argv[0]))
sys.exit('eg: python %s 100 3 2.0 mytestimage' % (sys.argv[0]))
def calc_score_everywhere(model_sigma):
KLRevscore = -100000 * np.ones(X1.shape)
for midx1 in range(N):
for midx2 in range(N):
# make up a "model" centered at this site
expsum =np.power((X1-midx1)*1.0/model_sigma,2.0) + np.power((X2-midx2)*1.0/model_sigma,2.0)
model = np.exp(-0.5*expsum)
# score the model against the "image"
KLRevscore[midx1][midx2] = np.sum(np.ravel(model*np.log(y)))/np.sum(np.ravel(model))
# rescale to range in [0,1]
KLRevscore = (KLRevscore-np.min(KLRevscore))/(np.max(KLRevscore)-np.min(KLRevscore))
return KLRevscore
if __name__ == "__main__":
if len(sys.argv) < 1:
do_exit()
if len(sys.argv) == 1 :
N, noise_size, num_sources, image_name = 60, 1.0, 2, 'test'
else: # extra arguments are being supplied
if len(sys.argv) < 5: do_exit()
N = int(sys.argv[1]) # image size
num_sources = int(sys.argv[2])
noise_size = float(sys.argv[3])
image_name = sys.argv[4]
sigma = 10.0 # length scale of blobs in 'truth'
fig = pl.figure(figsize=(15,10))
# make an "image"
x1 = np.arange(N)
x2 = np.arange(N)
X1, X2 = np.meshgrid(x1, x2)
truth = np.zeros(X1.shape)
for i in range(num_sources):
midx1,midx2 = rng.random() * N, rng.random() * N
expsum =np.power((X1-midx1)*1.0/sigma,2.0) + np.power((X2-midx2)*1.0/sigma,2.0)
truth = truth + np.exp(-0.5*expsum)
noise = rng.normal(0,noise_size,X1.shape)
y = truth + noise
y = (y-np.min(y))/(np.max(y)-np.min(y)) + np.abs(rng.normal(0,0.01,X1.shape))
# draw the ground truth (as an image),
pl.subplot(241)
pl.imshow(truth,interpolation='nearest',cmap=cm.gray)
pl.title('ground truth')
# draw the noisy image.
pl.subplot(242)
pl.imshow(y,interpolation='nearest',cmap=cm.gray)
pl.title('image')
s=3
for sig in (sigma*0.2, sigma*0.5, sigma, sigma*2.0, sigma*4.0):
# Evaluate the reversed KL score, throughout the image
KLRevscore = calc_score_everywhere(sig)
# draw the inferred source density (as contours).
pl.subplot(2,4,s+1)
s += 1
pl.imshow(truth*0.0,interpolation='nearest',cmap=cm.gray)
CS = pl.contour(X2,X1,KLRevscore,5,linewidths=np.arange(5),
colors=((.2,.2,0),(.4,.4,0),(.6,.6,0),(.8,.8,0),(1,1,0)))
pl.clabel(CS, inline=1, fontsize=10)
pl.title('score with sigma=%.1f' % (sig))
pl.draw()
fig.savefig(image_name)
print 'Wrote %s.png' % (image_name)
pl.show()
| gpl-2.0 |
EducationalTestingService/rsmtool | tests/test_utils_prmse.py | 1 | 11698 | import os
import warnings
from pathlib import Path
import numpy as np
import pandas as pd
from nose.tools import assert_almost_equal, eq_, ok_, raises
from numpy.testing import assert_array_equal
from pandas.testing import assert_frame_equal
from rsmtool.utils.prmse import (get_n_human_scores,
get_true_score_evaluations,
mse_true,
prmse_true,
true_score_variance,
variance_of_errors)
# allow test directory to be set via an environment variable
# which is needed for package testing
TEST_DIR = os.environ.get('TESTDIR', None)
if TEST_DIR:
rsmtool_test_dir = TEST_DIR
else:
from rsmtool.test_utils import rsmtool_test_dir
def test_compute_n_human_scores():
df = pd.DataFrame({'h1': [1, 2, 3, 4],
'h2': [1, None, 2, None],
'h3': [None, None, 1, None]})
expected_n = pd.Series([2, 1, 3, 1])
n_scores = get_n_human_scores(df)
assert_array_equal(expected_n, n_scores)
def test_compute_n_human_scores_zeros():
df = pd.DataFrame({'h1': [1, 2, 3, None],
'h2': [1, None, 2, None],
'h3': [None, None, 1, None]})
expected_n = pd.Series([2, 1, 3, 0])
n_scores = get_n_human_scores(df)
assert_array_equal(expected_n, n_scores)
def test_compute_n_human_scores_array():
df = pd.DataFrame({'h1': [1, 2, 3, None],
'h2': [1, None, 2, None],
'h3': [None, None, 1, None]})
arr = df.to_numpy()
expected_n = pd.Series([2, 1, 3, 0])
n_scores = get_n_human_scores(arr)
assert_array_equal(expected_n, n_scores)
def test_prmse_single_human_ve():
df = pd.DataFrame({'system': [1, 2, 5],
'sc1': [2, 3, 5]})
prmse = prmse_true(df['system'], df['sc1'], 0.5)
eq_(prmse, 0.9090909090909091)
def test_prmse_single_human_ve_array_as_input():
system_scores = np.array([1, 2, 5])
human_scores = np.array([2, 3, 5])
prmse = prmse_true(system_scores, human_scores, 0.5)
eq_(prmse, 0.9090909090909091)
def test_variance_of_errors_all_single_scored():
# this test should raise a UserWarning
sc1 = [1, 2, 3, None, None]
sc2 = [None, None, None, 2, 3]
df = pd.DataFrame({'sc1': sc1,
'sc2': sc2})
with warnings.catch_warnings(record=True) as warning_list:
variance_of_errors_human = variance_of_errors(df)
ok_(variance_of_errors_human is None)
assert issubclass(warning_list[-1].category, UserWarning)
def test_prmse_all_single_scored():
# this test should raise a UserWarning
system_scores = [1, 2, 3, 4, 5]
sc1 = [1, 2, 3, None, None]
sc2 = [None, None, None, 2, 3]
df = pd.DataFrame({'sc1': sc1,
'sc2': sc2,
'system': system_scores})
with warnings.catch_warnings(record=True) as warning_list:
prmse = prmse_true(df['system'], df[['sc1', 'sc2']])
ok_(prmse is None)
assert issubclass(warning_list[-1].category, UserWarning)
@raises(ValueError)
def test_get_true_score_evaluations_single_human_no_ve():
df = pd.DataFrame({'system': [1, 2, 5],
'sc1': [2, 3, 5]})
get_true_score_evaluations(df, 'system', 'sc1')
class TestPrmseJohnsonData():
"""
This class tests the PRMSE functions against the benchmarks
provided by Matt Johnson who did the original derivation and
implemented the function in R. This test ensures that Python
implementation results in the same values
"""
def setUp(self):
full_matrix_file = Path(rsmtool_test_dir) / 'data' / 'files' / 'prmse_data.csv'
sparse_matrix_file = Path(rsmtool_test_dir) / 'data' / 'files' / 'prmse_data_sparse_matrix.csv'
self.data_full = pd.read_csv(full_matrix_file)
self.data_sparse = pd.read_csv(sparse_matrix_file)
self.human_score_columns = ['h1', 'h2', 'h3', 'h4']
self.system_score_columns = ['system']
def test_variance_of_errors_full_matrix(self):
human_scores = self.human_score_columns
df_humans = self.data_full[human_scores]
variance_errors_human = variance_of_errors(df_humans)
expected_v_e = 0.509375
eq_(variance_errors_human, expected_v_e)
def test_variance_of_errors_sparse_matrix(self):
human_scores = self.human_score_columns
df_humans = self.data_sparse[human_scores]
variance_errors_human = variance_of_errors(df_humans)
expected_v_e = 0.5150882
assert_almost_equal(variance_errors_human, expected_v_e, 7)
def test_variance_of_true_scores_full_matrix(self):
human_scores = self.human_score_columns
df_humans = self.data_full[human_scores]
variance_errors_human = 0.509375
expected_var_true = 0.7765515
var_true = true_score_variance(df_humans,
variance_errors_human)
assert_almost_equal(var_true, expected_var_true, 7)
def test_variance_of_true_scores_sparse_matrix(self):
human_scores = self.human_score_columns
df_humans = self.data_sparse[human_scores]
variance_errors_human = 0.5150882
expected_var_true = 0.769816
var_true = true_score_variance(df_humans,
variance_errors_human)
assert_almost_equal(var_true, expected_var_true, 7)
def test_variance_of_true_scores_sparse_matrix_computed_ve(self):
human_scores = self.human_score_columns
df_humans = self.data_sparse[human_scores]
expected_var_true = 0.769816
var_true = true_score_variance(df_humans)
assert_almost_equal(var_true, expected_var_true, 7)
def test_mse_full_matrix(self):
human_scores = self.human_score_columns
df_humans = self.data_full[human_scores]
system = self.data_full['system']
variance_errors_human = 0.509375
expected_mse_true = 0.3564625
mse = mse_true(system,
df_humans,
variance_errors_human)
assert_almost_equal(mse, expected_mse_true, 7)
def test_mse_sparse_matrix(self):
human_scores = self.human_score_columns
df_humans = self.data_sparse[human_scores]
system = self.data_sparse['system']
variance_errors_human = 0.5150882
expected_mse_true = 0.3550792
mse = mse_true(system,
df_humans,
variance_errors_human)
assert_almost_equal(mse, expected_mse_true, 7)
def test_mse_sparse_matrix_computed_ve(self):
human_scores = self.human_score_columns
df_humans = self.data_sparse[human_scores]
system = self.data_sparse['system']
expected_mse_true = 0.3550792
mse = mse_true(system,
df_humans)
assert_almost_equal(mse, expected_mse_true, 7)
def test_prmse_full_matrix_given_ve(self):
human_scores = self.human_score_columns
df_humans = self.data_full[human_scores]
system = self.data_full['system']
variance_errors_human = 0.509375
expected_prmse_true = 0.5409673
prmse = prmse_true(system,
df_humans,
variance_errors_human)
assert_almost_equal(prmse, expected_prmse_true, 7)
def test_prmse_sparse_matrix_given_ve(self):
human_scores = self.human_score_columns
df_humans = self.data_sparse[human_scores]
system = self.data_sparse['system']
variance_errors_human = 0.5150882
expected_prmse_true = 0.538748
prmse = prmse_true(system,
df_humans,
variance_errors_human)
assert_almost_equal(prmse, expected_prmse_true, 7)
def test_prmse_full_matrix_computed_ve(self):
human_scores = self.human_score_columns
df_humans = self.data_full[human_scores]
system = self.data_full['system']
expected_prmse_true = 0.5409673
prmse = prmse_true(system,
df_humans)
assert_almost_equal(prmse, expected_prmse_true, 7)
def test_prmse_sparse_matrix_computed_ve(self):
human_scores = self.human_score_columns
df_humans = self.data_sparse[human_scores]
system = self.data_sparse['system']
expected_prmse_true = 0.538748
prmse = prmse_true(system,
df_humans)
assert_almost_equal(prmse, expected_prmse_true, 7)
def test_prmse_sparse_matrix_array_as_input(self):
human_scores = self.human_score_columns
df_humans = self.data_sparse[human_scores].to_numpy()
system = np.array(self.data_sparse['system'])
expected_prmse_true = 0.538748
prmse = prmse_true(system,
df_humans)
assert_almost_equal(prmse, expected_prmse_true, 7)
def test_compute_true_score_evaluations_full(self):
expected_df = pd.DataFrame({'N': 10000,
"N raters": 4,
"N single": 0,
"N multiple": 10000,
"Variance of errors": 0.509375,
"True score var": 0.7765515,
'MSE true': 0.3564625,
'PRMSE true': 0.5409673},
index=['system'])
df_prmse = get_true_score_evaluations(self.data_full,
self.system_score_columns,
self.human_score_columns)
assert_frame_equal(df_prmse, expected_df, check_dtype=False)
def test_compute_true_score_evaluations_sparse(self):
expected_df = pd.DataFrame({'N': 10000,
"N raters": 4,
"N single": 3421,
"N multiple": 6579,
"Variance of errors": 0.5150882,
"True score var": 0.769816,
'MSE true': 0.3550792,
'PRMSE true': 0.538748},
index=['system'])
df_prmse = get_true_score_evaluations(self.data_sparse,
self.system_score_columns,
self.human_score_columns)
assert_frame_equal(df_prmse, expected_df, check_dtype=False)
def test_compute_true_score_evaluations_given_ve(self):
expected_df = pd.DataFrame({'N': 10000,
"N raters": 4,
"N single": 3421,
"N multiple": 6579,
"Variance of errors": 0.5150882,
"True score var": 0.769816,
'MSE true': 0.3550792,
'PRMSE true': 0.538748},
index=['system'])
df_prmse = get_true_score_evaluations(self.data_sparse,
self.system_score_columns,
self.human_score_columns,
variance_errors_human=0.5150882)
assert_frame_equal(df_prmse, expected_df, check_dtype=False)
| apache-2.0 |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/io/tests/test_json/test_ujson.py | 9 | 54415 | # -*- coding: utf-8 -*-
from unittest import TestCase
try:
import json
except ImportError:
import simplejson as json
import math
import nose
import platform
import sys
import time
import datetime
import calendar
import re
import decimal
from functools import partial
from pandas.compat import range, zip, StringIO, u
import pandas.json as ujson
import pandas.compat as compat
import numpy as np
from numpy.testing import (assert_array_almost_equal_nulp,
assert_approx_equal)
import pytz
import dateutil
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex
import pandas.util.testing as tm
def _skip_if_python_ver(skip_major, skip_minor=None):
major, minor = sys.version_info[:2]
if major == skip_major and (skip_minor is None or minor == skip_minor):
raise nose.SkipTest("skipping Python version %d.%d" % (major, minor))
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
class UltraJSONTests(TestCase):
def test_encodeDecimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
self.assertEqual(decoded, 1337.1337)
def test_encodeStringConversion(self):
input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t <\\/script> &"'
html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t \\u003c\\/script\\u003e \\u0026"'
def helper(expected_output, **encode_kwargs):
output = ujson.encode(input, **encode_kwargs)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, expected_output)
self.assertEqual(input, ujson.decode(output))
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded, ensure_ascii=True)
helper(not_html_encoded, ensure_ascii=False)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, ensure_ascii=True, encode_html_chars=False)
helper(not_html_encoded, ensure_ascii=False, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, ensure_ascii=True, encode_html_chars=True)
helper(html_encoded, ensure_ascii=False, encode_html_chars=True)
def test_doubleLongIssue(self):
sut = {u('a'): -4342969734183514}
encoded = json.dumps(sut)
decoded = json.loads(encoded)
self.assertEqual(sut, decoded)
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
self.assertEqual(sut, decoded)
def test_doubleLongDecimalIssue(self):
sut = {u('a'): -12345678901234.56789012}
encoded = json.dumps(sut)
decoded = json.loads(encoded)
self.assertEqual(sut, decoded)
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
self.assertEqual(sut, decoded)
def test_encodeNonCLocale(self):
import locale
savedlocale = locale.getlocale(locale.LC_NUMERIC)
try:
locale.setlocale(locale.LC_NUMERIC, 'it_IT.UTF-8')
except:
try:
locale.setlocale(locale.LC_NUMERIC, 'Italian_Italy')
except:
raise nose.SkipTest('Could not set locale for testing')
self.assertEqual(ujson.loads(ujson.dumps(4.78e60)), 4.78e60)
self.assertEqual(ujson.loads('4.78', precise_float=True), 4.78)
locale.setlocale(locale.LC_NUMERIC, savedlocale)
def test_encodeDecodeLongDecimal(self):
sut = {u('a'): -528656961.4399388}
encoded = ujson.dumps(sut, double_precision=15)
ujson.decode(encoded)
def test_decimalDecodeTestPrecise(self):
sut = {u('a'): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
self.assertEqual(sut, decoded)
def test_encodeDoubleTinyExponential(self):
if compat.is_platform_windows() and not compat.PY3:
raise nose.SkipTest("buggy on win-64 for py2")
num = 1e-40
self.assertEqual(num, ujson.decode(ujson.encode(num)))
num = 1e-100
self.assertEqual(num, ujson.decode(ujson.encode(num)))
num = -1e-45
self.assertEqual(num, ujson.decode(ujson.encode(num)))
num = -1e-145
self.assertTrue(np.allclose(num, ujson.decode(ujson.encode(num))))
def test_encodeDictWithUnicodeKeys(self):
input = {u("key1"): u("value1"), u("key1"):
u("value1"), u("key1"): u("value1"),
u("key1"): u("value1"), u("key1"):
u("value1"), u("key1"): u("value1")}
output = ujson.encode(input)
input = {u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1")}
output = ujson.encode(input)
pass
def test_encodeDoubleConversion(self):
input = math.pi
output = ujson.encode(input)
self.assertEqual(round(input, 5), round(json.loads(output), 5))
self.assertEqual(round(input, 5), round(ujson.decode(output), 5))
def test_encodeWithDecimal(self):
input = 1.0
output = ujson.encode(input)
self.assertEqual(output, "1.0")
def test_encodeDoubleNegConversion(self):
input = -math.pi
output = ujson.encode(input)
self.assertEqual(round(input, 5), round(json.loads(output), 5))
self.assertEqual(round(input, 5), round(ujson.decode(output), 5))
def test_encodeArrayOfNestedArrays(self):
input = [[[[]]]] * 20
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
#self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
input = np.array(input)
tm.assert_numpy_array_equal(input, ujson.decode(output, numpy=True, dtype=input.dtype))
def test_encodeArrayOfDoubles(self):
input = [ 31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
#self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
tm.assert_numpy_array_equal(np.array(input), ujson.decode(output, numpy=True))
def test_doublePrecisionTest(self):
input = 30.012345678901234
output = ujson.encode(input, double_precision = 15)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
output = ujson.encode(input, double_precision = 9)
self.assertEqual(round(input, 9), json.loads(output))
self.assertEqual(round(input, 9), ujson.decode(output))
output = ujson.encode(input, double_precision = 3)
self.assertEqual(round(input, 3), json.loads(output))
self.assertEqual(round(input, 3), ujson.decode(output))
def test_invalidDoublePrecision(self):
input = 30.12345678901234567890
self.assertRaises(ValueError, ujson.encode, input, double_precision = 20)
self.assertRaises(ValueError, ujson.encode, input, double_precision = -1)
# will throw typeError
self.assertRaises(TypeError, ujson.encode, input, double_precision = '9')
# will throw typeError
self.assertRaises(TypeError, ujson.encode, input, double_precision = None)
def test_encodeStringConversion(self):
input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, '"A string \\\\ \\/ \\b \\f \\n \\r \\t"')
self.assertEqual(input, ujson.decode(output))
pass
def test_decodeUnicodeConversion(self):
pass
def test_encodeUnicodeConversion1(self):
input = "Räksmörgås اسامة بن محمد بن عوض بن لادن"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeControlEscaping(self):
input = "\x19"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(input, dec)
self.assertEqual(enc, json_unicode(input))
def test_encodeUnicodeConversion2(self):
input = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeUnicodeSurrogatePair(self):
_skip_if_python_ver(2, 5)
_skip_if_python_ver(2, 6)
input = "\xf0\x90\x8d\x86"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeUnicode4BytesUTF8(self):
_skip_if_python_ver(2, 5)
_skip_if_python_ver(2, 6)
input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeUnicode4BytesUTF8Highest(self):
_skip_if_python_ver(2, 5)
_skip_if_python_ver(2, 6)
input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeArrayInArray(self):
input = [[[[]]]]
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
tm.assert_numpy_array_equal(np.array(input), ujson.decode(output, numpy=True))
pass
def test_encodeIntConversion(self):
input = 31337
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeIntNegConversion(self):
input = -31337
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeLongNegConversion(self):
input = -9223372036854775808
output = ujson.encode(input)
outputjson = json.loads(output)
outputujson = ujson.decode(output)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeListConversion(self):
input = [ 1, 2, 3, 4 ]
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
tm.assert_numpy_array_equal(np.array(input), ujson.decode(output, numpy=True))
pass
def test_encodeDictConversion(self):
input = { "k1": 1, "k2": 2, "k3": 3, "k4": 4 }
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeNoneConversion(self):
input = None
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeTrueConversion(self):
input = True
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeFalseConversion(self):
input = False
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
def test_encodeDatetimeConversion(self):
ts = time.time()
input = datetime.datetime.fromtimestamp(ts)
output = ujson.encode(input, date_unit='s')
expected = calendar.timegm(input.utctimetuple())
self.assertEqual(int(expected), json.loads(output))
self.assertEqual(int(expected), ujson.decode(output))
def test_encodeDateConversion(self):
ts = time.time()
input = datetime.date.fromtimestamp(ts)
output = ujson.encode(input, date_unit='s')
tup = (input.year, input.month, input.day, 0, 0, 0)
expected = calendar.timegm(tup)
self.assertEqual(int(expected), json.loads(output))
self.assertEqual(int(expected), ujson.decode(output))
def test_encodeTimeConversion(self):
tests = [
datetime.time(),
datetime.time(1, 2, 3),
datetime.time(10, 12, 15, 343243),
datetime.time(10, 12, 15, 343243, pytz.utc),
# datetime.time(10, 12, 15, 343243, dateutil.tz.gettz('UTC')), # this segfaults! No idea why.
]
for test in tests:
output = ujson.encode(test)
expected = '"%s"' % test.isoformat()
self.assertEqual(expected, output)
def test_nat(self):
input = NaT
assert ujson.encode(input) == 'null', "Expected null"
def test_npy_nat(self):
from distutils.version import LooseVersion
if LooseVersion(np.__version__) < '1.7.0':
raise nose.SkipTest("numpy version < 1.7.0, is "
"{0}".format(np.__version__))
input = np.datetime64('NaT')
assert ujson.encode(input) == 'null', "Expected null"
def test_datetime_units(self):
from pandas.lib import Timestamp
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
self.assertEqual(roundtrip, stamp.value // 10**9)
roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
self.assertEqual(roundtrip, stamp.value // 10**6)
roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
self.assertEqual(roundtrip, stamp.value // 10**3)
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
self.assertEqual(roundtrip, stamp.value)
self.assertRaises(ValueError, ujson.encode, val, date_unit='foo')
def test_encodeToUTF8(self):
_skip_if_python_ver(2, 5)
input = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(input, ensure_ascii=False)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input, ensure_ascii=False))
self.assertEqual(dec, json.loads(enc))
def test_decodeFromUnicode(self):
input = u("{\"obj\": 31337}")
dec1 = ujson.decode(input)
dec2 = ujson.decode(str(input))
self.assertEqual(dec1, dec2)
def test_encodeRecursionMax(self):
# 8 is the max recursion depth
class O2:
member = 0
pass
class O1:
member = 0
pass
input = O1()
input.member = O2()
input.member.member = input
try:
output = ujson.encode(input)
assert False, "Expected overflow exception"
except(OverflowError):
pass
def test_encodeDoubleNan(self):
input = np.nan
assert ujson.encode(input) == 'null', "Expected null"
def test_encodeDoubleInf(self):
input = np.inf
assert ujson.encode(input) == 'null', "Expected null"
def test_encodeDoubleNegInf(self):
input = -np.inf
assert ujson.encode(input) == 'null', "Expected null"
def test_decodeJibberish(self):
input = "fdsa sda v9sa fdsa"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenArrayStart(self):
input = "["
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenObjectStart(self):
input = "{"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenArrayEnd(self):
input = "]"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeArrayDepthTooBig(self):
input = '[' * (1024 * 1024)
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenObjectEnd(self):
input = "}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeObjectDepthTooBig(self):
input = '{' * (1024 * 1024)
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringUnterminated(self):
input = "\"TESTING"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringUntermEscapeSequence(self):
input = "\"TESTING\\\""
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringBadEscape(self):
input = "\"TESTING\\\""
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeTrueBroken(self):
input = "tru"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeFalseBroken(self):
input = "fa"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeNullBroken(self):
input = "n"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenDictKeyTypeLeakTest(self):
input = '{{1337:""}}'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except ValueError as e:
continue
assert False, "Wrong exception"
def test_decodeBrokenDictLeakTest(self):
input = '{{"key":"}'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
continue
assert False, "Wrong exception"
def test_decodeBrokenListLeakTest(self):
input = '[[[true'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
continue
assert False, "Wrong exception"
def test_decodeDictWithNoKey(self):
input = "{{{{31337}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeDictWithNoColonOrValue(self):
input = "{{{{\"key\"}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeDictWithNoValue(self):
input = "{{{{\"key\":}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeNumericIntPos(self):
input = "31337"
self.assertEqual(31337, ujson.decode(input))
def test_decodeNumericIntNeg(self):
input = "-31337"
self.assertEqual(-31337, ujson.decode(input))
def test_encodeUnicode4BytesUTF8Fail(self):
_skip_if_python_ver(3)
input = "\xfd\xbf\xbf\xbf\xbf\xbf"
try:
enc = ujson.encode(input)
assert False, "Expected exception"
except OverflowError:
pass
def test_encodeNullCharacter(self):
input = "31337 \x00 1337"
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
input = "\x00"
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
self.assertEqual('" \\u0000\\r\\n "', ujson.dumps(u(" \u0000\r\n ")))
pass
def test_decodeNullCharacter(self):
input = "\"31337 \\u0000 31337\""
self.assertEqual(ujson.decode(input), json.loads(input))
def test_encodeListLongConversion(self):
input = [9223372036854775807, 9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807, 9223372036854775807 ]
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
tm.assert_numpy_array_equal(np.array(input), ujson.decode(output, numpy=True,
dtype=np.int64))
pass
def test_encodeLongConversion(self):
input = 9223372036854775807
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_numericIntExp(self):
input = "1337E40"
output = ujson.decode(input)
self.assertEqual(output, json.loads(input))
def test_numericIntFrcExp(self):
input = "1.337E40"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpEPLUS(self):
input = "1337E+9"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpePLUS(self):
input = "1.337e+40"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpE(self):
input = "1337E40"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpe(self):
input = "1337e40"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpEMinus(self):
input = "1.337E-4"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpeMinus(self):
input = "1.337e-4"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_dumpToFile(self):
f = StringIO()
ujson.dump([1, 2, 3], f)
self.assertEqual("[1,2,3]", f.getvalue())
def test_dumpToFileLikeObject(self):
class filelike:
def __init__(self):
self.bytes = ''
def write(self, bytes):
self.bytes += bytes
f = filelike()
ujson.dump([1, 2, 3], f)
self.assertEqual("[1,2,3]", f.bytes)
def test_dumpFileArgsError(self):
try:
ujson.dump([], '')
except TypeError:
pass
else:
assert False, 'expected TypeError'
def test_loadFile(self):
f = StringIO("[1,2,3,4]")
self.assertEqual([1, 2, 3, 4], ujson.load(f))
f = StringIO("[1,2,3,4]")
tm.assert_numpy_array_equal(np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
def test_loadFileLikeObject(self):
class filelike:
def read(self):
try:
self.end
except AttributeError:
self.end = True
return "[1,2,3,4]"
f = filelike()
self.assertEqual([1, 2, 3, 4], ujson.load(f))
f = filelike()
tm.assert_numpy_array_equal(np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
def test_loadFileArgsError(self):
try:
ujson.load("[]")
except TypeError:
pass
else:
assert False, "expected TypeError"
def test_version(self):
assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
"ujson.__version__ must be a string like '1.4.0'"
def test_encodeNumericOverflow(self):
try:
ujson.encode(12839128391289382193812939)
except OverflowError:
pass
else:
assert False, "expected OverflowError"
def test_encodeNumericOverflowNested(self):
for n in range(0, 100):
class Nested:
x = 12839128391289382193812939
nested = Nested()
try:
ujson.encode(nested)
except OverflowError:
pass
else:
assert False, "expected OverflowError"
def test_decodeNumberWith32bitSignBit(self):
#Test that numbers that fit within 32 bits but would have the
# sign bit set (2**31 <= x < 2**32) are decoded properly.
boundary1 = 2**31
boundary2 = 2**32
docs = (
'{"id": 3590016419}',
'{"id": %s}' % 2**31,
'{"id": %s}' % 2**32,
'{"id": %s}' % ((2**32)-1),
)
results = (3590016419, 2**31, 2**32, 2**32-1)
for doc,result in zip(docs, results):
self.assertEqual(ujson.decode(doc)['id'], result)
def test_encodeBigEscape(self):
for x in range(10):
if compat.PY3:
base = '\u00e5'.encode('utf-8')
else:
base = "\xc3\xa5"
input = base * 1024 * 1024 * 2
output = ujson.encode(input)
def test_decodeBigEscape(self):
for x in range(10):
if compat.PY3:
base = '\u00e5'.encode('utf-8')
else:
base = "\xc3\xa5"
quote = compat.str_to_bytes("\"")
input = quote + (base * 1024 * 1024 * 2) + quote
output = ujson.decode(input)
def test_toDict(self):
d = {u("key"): 31337}
class DictTest:
def toDict(self):
return d
o = DictTest()
output = ujson.encode(o)
dec = ujson.decode(output)
self.assertEqual(dec, d)
def test_defaultHandler(self):
class _TestObject(object):
def __init__(self, val):
self.val = val
@property
def recursive_attr(self):
return _TestObject("recursive_attr")
def __str__(self):
return str(self.val)
self.assertRaises(OverflowError, ujson.encode, _TestObject("foo"))
self.assertEqual('"foo"', ujson.encode(_TestObject("foo"),
default_handler=str))
def my_handler(obj):
return "foobar"
self.assertEqual('"foobar"', ujson.encode(_TestObject("foo"),
default_handler=my_handler))
def my_handler_raises(obj):
raise TypeError("I raise for anything")
with tm.assertRaisesRegexp(TypeError, "I raise for anything"):
ujson.encode(_TestObject("foo"), default_handler=my_handler_raises)
def my_int_handler(obj):
return 42
self.assertEqual(
42, ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_int_handler)))
def my_obj_handler(obj):
return datetime.datetime(2013, 2, 3)
self.assertEqual(
ujson.decode(ujson.encode(datetime.datetime(2013, 2, 3))),
ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_obj_handler)))
l = [_TestObject("foo"), _TestObject("bar")]
self.assertEqual(json.loads(json.dumps(l, default=str)),
ujson.decode(ujson.encode(l, default_handler=str)))
class NumpyJSONTests(TestCase):
def testBool(self):
b = np.bool(True)
self.assertEqual(ujson.decode(ujson.encode(b)), b)
def testBoolArray(self):
inpt = np.array([True, False, True, True, False, True, False , False],
dtype=np.bool)
outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=np.bool)
tm.assert_numpy_array_equal(inpt, outp)
def testInt(self):
num = np.int(2562010)
self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)
num = np.int8(127)
self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)
num = np.int16(2562010)
self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)
num = np.int32(2562010)
self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)
num = np.int64(2562010)
self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)
num = np.uint8(255)
self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)
num = np.uint16(2562010)
self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)
num = np.uint32(2562010)
self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)
num = np.uint64(2562010)
self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
def testIntArray(self):
arr = np.arange(100, dtype=np.int)
dtypes = (np.int, np.int8, np.int16, np.int32, np.int64,
np.uint, np.uint8, np.uint16, np.uint32, np.uint64)
for dtype in dtypes:
inpt = arr.astype(dtype)
outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=dtype)
tm.assert_numpy_array_equal(inpt, outp)
def testIntMax(self):
num = np.int(np.iinfo(np.int).max)
self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)
num = np.int8(np.iinfo(np.int8).max)
self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)
num = np.int16(np.iinfo(np.int16).max)
self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)
num = np.int32(np.iinfo(np.int32).max)
self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)
num = np.uint8(np.iinfo(np.uint8).max)
self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)
num = np.uint16(np.iinfo(np.uint16).max)
self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)
num = np.uint32(np.iinfo(np.uint32).max)
self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)
if platform.architecture()[0] != '32bit':
num = np.int64(np.iinfo(np.int64).max)
self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)
# uint64 max will always overflow as it's encoded to signed
num = np.uint64(np.iinfo(np.int64).max)
self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
def testFloat(self):
num = np.float(256.2013)
self.assertEqual(np.float(ujson.decode(ujson.encode(num))), num)
num = np.float32(256.2013)
self.assertEqual(np.float32(ujson.decode(ujson.encode(num))), num)
num = np.float64(256.2013)
self.assertEqual(np.float64(ujson.decode(ujson.encode(num))), num)
def testFloatArray(self):
arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
dtypes = (np.float, np.float32, np.float64)
for dtype in dtypes:
inpt = arr.astype(dtype)
outp = np.array(ujson.decode(ujson.encode(inpt, double_precision=15)), dtype=dtype)
assert_array_almost_equal_nulp(inpt, outp)
def testFloatMax(self):
num = np.float(np.finfo(np.float).max/10)
assert_approx_equal(np.float(ujson.decode(ujson.encode(num, double_precision=15))), num, 15)
num = np.float32(np.finfo(np.float32).max/10)
assert_approx_equal(np.float32(ujson.decode(ujson.encode(num, double_precision=15))), num, 15)
num = np.float64(np.finfo(np.float64).max/10)
assert_approx_equal(np.float64(ujson.decode(ujson.encode(num, double_precision=15))), num, 15)
def testArrays(self):
arr = np.arange(100)
arr = arr.reshape((10, 10))
tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
arr = arr.reshape((5, 5, 4))
tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
arr = arr.reshape((100, 1))
tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
arr = np.arange(96)
arr = arr.reshape((2, 2, 2, 2, 3, 2))
tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
l = ['a', list(), dict(), dict(), list(),
42, 97.8, ['a', 'b'], {'key': 'val'}]
arr = np.array(l)
tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
arr = np.arange(100.202, 200.202, 1, dtype=np.float32)
arr = arr.reshape((5, 5, 4))
outp = np.array(ujson.decode(ujson.encode(arr)), dtype=np.float32)
assert_array_almost_equal_nulp(arr, outp)
outp = ujson.decode(ujson.encode(arr), numpy=True, dtype=np.float32)
assert_array_almost_equal_nulp(arr, outp)
def testOdArray(self):
def will_raise():
ujson.encode(np.array(1))
self.assertRaises(TypeError, will_raise)
def testArrayNumpyExcept(self):
input = ujson.dumps([42, {}, 'a'])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(TypeError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps(['a', 'b', [], 'c'])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([['a'], 42])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([42, ['a'], 42])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{}, []])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([42, None])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(TypeError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{'a': 'b'}])
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps({'a': {'b': {'c': 42}}})
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{'a': 42, 'b': 23}, {'c': 17}])
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
def testArrayNumpyLabelled(self):
input = {'a': []}
output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
self.assertTrue((np.empty((1, 0)) == output[0]).all())
self.assertTrue((np.array(['a']) == output[1]).all())
self.assertTrue(output[2] is None)
input = [{'a': 42}]
output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
self.assertTrue((np.array([42]) == output[0]).all())
self.assertTrue(output[1] is None)
self.assertTrue((np.array([u('a')]) == output[2]).all())
# Write out the dump explicitly so there is no dependency on iteration order GH10837
input_dumps = '[{"a": 42, "b":31}, {"a": 24, "c": 99}, {"a": 2.4, "b": 78}]'
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expectedvals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
self.assertTrue((expectedvals == output[0]).all())
self.assertTrue(output[1] is None)
self.assertTrue((np.array([u('a'), 'b']) == output[2]).all())
input_dumps = '{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, "3": {"a": 2.4, "b": 78}}'
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expectedvals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
self.assertTrue((expectedvals == output[0]).all())
self.assertTrue((np.array(['1', '2', '3']) == output[1]).all())
self.assertTrue((np.array(['a', 'b']) == output[2]).all())
class PandasJSONTests(TestCase):
def testDataFrame(self):
df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(ujson.decode(ujson.encode(df)))
self.assertTrue((df == outp).values.all())
tm.assert_numpy_array_equal(df.columns, outp.columns)
tm.assert_numpy_array_equal(df.index, outp.index)
dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split")))
outp = DataFrame(**dec)
self.assertTrue((df == outp).values.all())
tm.assert_numpy_array_equal(df.columns, outp.columns)
tm.assert_numpy_array_equal(df.index, outp.index)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="records")))
outp.index = df.index
self.assertTrue((df == outp).values.all())
tm.assert_numpy_array_equal(df.columns, outp.columns)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="values")))
outp.index = df.index
self.assertTrue((df.values == outp.values).all())
outp = DataFrame(ujson.decode(ujson.encode(df, orient="index")))
self.assertTrue((df.transpose() == outp).values.all())
tm.assert_numpy_array_equal(df.transpose().columns, outp.columns)
tm.assert_numpy_array_equal(df.transpose().index, outp.index)
def testDataFrameNumpy(self):
df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(ujson.decode(ujson.encode(df), numpy=True))
self.assertTrue((df == outp).values.all())
tm.assert_numpy_array_equal(df.columns, outp.columns)
tm.assert_numpy_array_equal(df.index, outp.index)
dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split"),
numpy=True))
outp = DataFrame(**dec)
self.assertTrue((df == outp).values.all())
tm.assert_numpy_array_equal(df.columns, outp.columns)
tm.assert_numpy_array_equal(df.index, outp.index)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="index"), numpy=True))
self.assertTrue((df.transpose() == outp).values.all())
tm.assert_numpy_array_equal(df.transpose().columns, outp.columns)
tm.assert_numpy_array_equal(df.transpose().index, outp.index)
def testDataFrameNested(self):
df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
nested = {'df1': df, 'df2': df.copy()}
exp = {'df1': ujson.decode(ujson.encode(df)),
'df2': ujson.decode(ujson.encode(df))}
self.assertTrue(ujson.decode(ujson.encode(nested)) == exp)
exp = {'df1': ujson.decode(ujson.encode(df, orient="index")),
'df2': ujson.decode(ujson.encode(df, orient="index"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="index")) == exp)
exp = {'df1': ujson.decode(ujson.encode(df, orient="records")),
'df2': ujson.decode(ujson.encode(df, orient="records"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="records")) == exp)
exp = {'df1': ujson.decode(ujson.encode(df, orient="values")),
'df2': ujson.decode(ujson.encode(df, orient="values"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="values")) == exp)
exp = {'df1': ujson.decode(ujson.encode(df, orient="split")),
'df2': ujson.decode(ujson.encode(df, orient="split"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="split")) == exp)
def testDataFrameNumpyLabelled(self):
df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(*ujson.decode(ujson.encode(df), numpy=True, labelled=True))
self.assertTrue((df.T == outp).values.all())
tm.assert_numpy_array_equal(df.T.columns, outp.columns)
tm.assert_numpy_array_equal(df.T.index, outp.index)
outp = DataFrame(*ujson.decode(ujson.encode(df, orient="records"), numpy=True, labelled=True))
outp.index = df.index
self.assertTrue((df == outp).values.all())
tm.assert_numpy_array_equal(df.columns, outp.columns)
outp = DataFrame(*ujson.decode(ujson.encode(df, orient="index"), numpy=True, labelled=True))
self.assertTrue((df == outp).values.all())
tm.assert_numpy_array_equal(df.columns, outp.columns)
tm.assert_numpy_array_equal(df.index, outp.index)
def testSeries(self):
s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15]).sort_values()
# column indexed
outp = Series(ujson.decode(ujson.encode(s))).sort_values()
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s), numpy=True)).sort_values()
self.assertTrue((s == outp).values.all())
dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split")))
outp = Series(**dec)
self.assertTrue((s == outp).values.all())
self.assertTrue(s.name == outp.name)
dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split"),
numpy=True))
outp = Series(**dec)
self.assertTrue((s == outp).values.all())
self.assertTrue(s.name == outp.name)
outp = Series(ujson.decode(ujson.encode(s, orient="records"), numpy=True))
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s, orient="records")))
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s, orient="values"), numpy=True))
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s, orient="values")))
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s, orient="index"))).sort_values()
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s, orient="index"), numpy=True)).sort_values()
self.assertTrue((s == outp).values.all())
def testSeriesNested(self):
s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15]).sort_values()
nested = {'s1': s, 's2': s.copy()}
exp = {'s1': ujson.decode(ujson.encode(s)),
's2': ujson.decode(ujson.encode(s))}
self.assertTrue(ujson.decode(ujson.encode(nested)) == exp)
exp = {'s1': ujson.decode(ujson.encode(s, orient="split")),
's2': ujson.decode(ujson.encode(s, orient="split"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="split")) == exp)
exp = {'s1': ujson.decode(ujson.encode(s, orient="records")),
's2': ujson.decode(ujson.encode(s, orient="records"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="records")) == exp)
exp = {'s1': ujson.decode(ujson.encode(s, orient="values")),
's2': ujson.decode(ujson.encode(s, orient="values"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="values")) == exp)
exp = {'s1': ujson.decode(ujson.encode(s, orient="index")),
's2': ujson.decode(ujson.encode(s, orient="index"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="index")) == exp)
def testIndex(self):
i = Index([23, 45, 18, 98, 43, 11], name="index")
# column indexed
outp = Index(ujson.decode(ujson.encode(i)))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i), numpy=True))
self.assertTrue(i.equals(outp))
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
outp = Index(**dec)
self.assertTrue(i.equals(outp))
self.assertTrue(i.name == outp.name)
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split"),
numpy=True))
outp = Index(**dec)
self.assertTrue(i.equals(outp))
self.assertTrue(i.name == outp.name)
outp = Index(ujson.decode(ujson.encode(i, orient="values")))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i, orient="values"), numpy=True))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i, orient="records")))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i, orient="records"), numpy=True))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i, orient="index")))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i, orient="index"), numpy=True))
self.assertTrue(i.equals(outp))
def test_datetimeindex(self):
from pandas.tseries.index import date_range
rng = date_range('1/1/2000', periods=20)
encoded = ujson.encode(rng, date_unit='ns')
decoded = DatetimeIndex(np.array(ujson.decode(encoded)))
self.assertTrue(rng.equals(decoded))
ts = Series(np.random.randn(len(rng)), index=rng)
decoded = Series(ujson.decode(ujson.encode(ts, date_unit='ns')))
idx_values = decoded.index.values.astype(np.int64)
decoded.index = DatetimeIndex(idx_values)
tm.assert_series_equal(ts, decoded)
def test_decodeArrayTrailingCommaFail(self):
input = "[31337,]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayLeadingCommaFail(self):
input = "[,31337]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayOnlyCommaFail(self):
input = "[,]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayUnmatchedBracketFail(self):
input = "[]]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayEmpty(self):
input = "[]"
ujson.decode(input)
def test_decodeArrayOneItem(self):
input = "[31337]"
ujson.decode(input)
def test_decodeBigValue(self):
input = "9223372036854775807"
ujson.decode(input)
def test_decodeSmallValue(self):
input = "-9223372036854775808"
ujson.decode(input)
def test_decodeTooBigValue(self):
try:
input = "9223372036854775808"
ujson.decode(input)
except ValueError as e:
pass
else:
assert False, "expected ValueError"
def test_decodeTooSmallValue(self):
try:
input = "-90223372036854775809"
ujson.decode(input)
except ValueError as e:
pass
else:
assert False, "expected ValueError"
def test_decodeVeryTooBigValue(self):
try:
input = "9223372036854775808"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeVeryTooSmallValue(self):
try:
input = "-90223372036854775809"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeWithTrailingWhitespaces(self):
input = "{}\n\t "
ujson.decode(input)
def test_decodeWithTrailingNonWhitespaces(self):
try:
input = "{}\n\t a"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayWithBigInt(self):
try:
ujson.loads('[18446098363113800555]')
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayFaultyUnicode(self):
try:
ujson.loads('[18446098363113800555]')
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeFloatingPointAdditionalTests(self):
places = 15
self.assertAlmostEqual(-1.1234567893, ujson.loads("-1.1234567893"), places=places)
self.assertAlmostEqual(-1.234567893, ujson.loads("-1.234567893"), places=places)
self.assertAlmostEqual(-1.34567893, ujson.loads("-1.34567893"), places=places)
self.assertAlmostEqual(-1.4567893, ujson.loads("-1.4567893"), places=places)
self.assertAlmostEqual(-1.567893, ujson.loads("-1.567893"), places=places)
self.assertAlmostEqual(-1.67893, ujson.loads("-1.67893"), places=places)
self.assertAlmostEqual(-1.7893, ujson.loads("-1.7893"), places=places)
self.assertAlmostEqual(-1.893, ujson.loads("-1.893"), places=places)
self.assertAlmostEqual(-1.3, ujson.loads("-1.3"), places=places)
self.assertAlmostEqual(1.1234567893, ujson.loads("1.1234567893"), places=places)
self.assertAlmostEqual(1.234567893, ujson.loads("1.234567893"), places=places)
self.assertAlmostEqual(1.34567893, ujson.loads("1.34567893"), places=places)
self.assertAlmostEqual(1.4567893, ujson.loads("1.4567893"), places=places)
self.assertAlmostEqual(1.567893, ujson.loads("1.567893"), places=places)
self.assertAlmostEqual(1.67893, ujson.loads("1.67893"), places=places)
self.assertAlmostEqual(1.7893, ujson.loads("1.7893"), places=places)
self.assertAlmostEqual(1.893, ujson.loads("1.893"), places=places)
self.assertAlmostEqual(1.3, ujson.loads("1.3"), places=places)
def test_encodeBigSet(self):
s = set()
for x in range(0, 100000):
s.add(x)
ujson.encode(s)
def test_encodeEmptySet(self):
s = set()
self.assertEqual("[]", ujson.encode(s))
def test_encodeSet(self):
s = set([1,2,3,4,5,6,7,8,9])
enc = ujson.encode(s)
dec = ujson.decode(enc)
for v in dec:
self.assertTrue(v in s)
def _clean_dict(d):
return dict((str(k), v) for k, v in compat.iteritems(d))
if __name__ == '__main__':
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
| mit |
gautam1858/tensorflow | tensorflow/tools/dist_test/python/census_widendeep.py | 48 | 11896 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed training and evaluation of a wide and deep model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
# Constants: Data download URLs
TRAIN_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data"
TEST_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test"
# Define features for the model
def census_model_config():
"""Configuration for the census Wide & Deep model.
Returns:
columns: Column names to retrieve from the data source
label_column: Name of the label column
wide_columns: List of wide columns
deep_columns: List of deep columns
categorical_column_names: Names of the categorical columns
continuous_column_names: Names of the continuous columns
"""
# 1. Categorical base columns.
gender = tf.contrib.layers.sparse_column_with_keys(
column_name="gender", keys=["female", "male"])
race = tf.contrib.layers.sparse_column_with_keys(
column_name="race",
keys=["Amer-Indian-Eskimo",
"Asian-Pac-Islander",
"Black",
"Other",
"White"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
"marital_status", hash_bucket_size=100)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# 2. Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
age_buckets = tf.contrib.layers.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
wide_columns = [
gender, native_country, education, occupation, workclass,
marital_status, relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age_buckets, race, occupation],
hash_bucket_size=int(1e6))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(marital_status, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(race, dimension=8),
tf.contrib.layers.embedding_column(native_country, dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age, education_num, capital_gain, capital_loss, hours_per_week]
# Define the column names for the data sets.
columns = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week",
"native_country", "income_bracket"]
label_column = "label"
categorical_columns = ["workclass", "education", "marital_status",
"occupation", "relationship", "race", "gender",
"native_country"]
continuous_columns = ["age", "education_num", "capital_gain",
"capital_loss", "hours_per_week"]
return (columns, label_column, wide_columns, deep_columns,
categorical_columns, continuous_columns)
class CensusDataSource(object):
"""Source of census data."""
def __init__(self, data_dir, train_data_url, test_data_url,
columns, label_column,
categorical_columns, continuous_columns):
"""Constructor of CensusDataSource.
Args:
data_dir: Directory to save/load the data files
train_data_url: URL from which the training data can be downloaded
test_data_url: URL from which the test data can be downloaded
columns: Columns to retrieve from the data files (A list of strings)
label_column: Name of the label column
categorical_columns: Names of the categorical columns (A list of strings)
continuous_columns: Names of the continuous columns (A list of strings)
"""
# Retrieve data from disk (if available) or download from the web.
train_file_path = os.path.join(data_dir, "adult.data")
if os.path.isfile(train_file_path):
print("Loading training data from file: %s" % train_file_path)
train_file = open(train_file_path)
else:
urllib.urlretrieve(train_data_url, train_file_path)
test_file_path = os.path.join(data_dir, "adult.test")
if os.path.isfile(test_file_path):
print("Loading test data from file: %s" % test_file_path)
test_file = open(test_file_path)
else:
test_file = open(test_file_path)
urllib.urlretrieve(test_data_url, test_file_path)
# Read the training and testing data sets into Pandas DataFrame.
import pandas # pylint: disable=g-import-not-at-top
self._df_train = pandas.read_csv(train_file, names=columns,
skipinitialspace=True)
self._df_test = pandas.read_csv(test_file, names=columns,
skipinitialspace=True, skiprows=1)
# Remove the NaN values in the last rows of the tables
self._df_train = self._df_train[:-1]
self._df_test = self._df_test[:-1]
# Apply the threshold to get the labels.
income_thresh = lambda x: ">50K" in x
self._df_train[label_column] = (
self._df_train["income_bracket"].apply(income_thresh)).astype(int)
self._df_test[label_column] = (
self._df_test["income_bracket"].apply(income_thresh)).astype(int)
self.label_column = label_column
self.categorical_columns = categorical_columns
self.continuous_columns = continuous_columns
def input_train_fn(self):
return self._input_fn(self._df_train)
def input_test_fn(self):
return self._input_fn(self._df_test)
# TODO(cais): Turn into minibatch feeder
def _input_fn(self, df):
"""Input data function.
Creates a dictionary mapping from each continuous feature column name
(k) to the values of that column stored in a constant Tensor.
Args:
df: data feed
Returns:
feature columns and labels
"""
continuous_cols = {k: tf.constant(df[k].values)
for k in self.continuous_columns}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in self.categorical_columns}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[self.label_column].values)
# Returns the feature columns and the label.
return feature_cols, label
def _create_experiment_fn(output_dir): # pylint: disable=unused-argument
"""Experiment creation function."""
(columns, label_column, wide_columns, deep_columns, categorical_columns,
continuous_columns) = census_model_config()
census_data_source = CensusDataSource(FLAGS.data_dir,
TRAIN_DATA_URL, TEST_DATA_URL,
columns, label_column,
categorical_columns,
continuous_columns)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
tf.contrib.learn.TaskType.PS: ["fake_ps"] *
FLAGS.num_parameter_servers
},
"task": {
"index": FLAGS.worker_index
}
})
config = run_config.RunConfig(master=FLAGS.master_grpc_url)
estimator = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=FLAGS.model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[5],
config=config)
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=census_data_source.input_train_fn,
eval_input_fn=census_data_source.input_test_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps
)
def main(unused_argv):
print("Worker index: %d" % FLAGS.worker_index)
learn_runner.run(experiment_fn=_create_experiment_fn,
output_dir=FLAGS.output_dir,
schedule=FLAGS.schedule)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/census-data",
help="Directory for storing the census data")
parser.add_argument(
"--model_dir",
type=str,
default="/tmp/census_wide_and_deep_model",
help="Directory for storing the model"
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Base output directory."
)
parser.add_argument(
"--schedule",
type=str,
default="local_run",
help="Schedule to run for this experiment."
)
parser.add_argument(
"--master_grpc_url",
type=str,
default="",
help="URL to master GRPC tensorflow server, e.g.,grpc://127.0.0.1:2222"
)
parser.add_argument(
"--num_parameter_servers",
type=int,
default=0,
help="Number of parameter servers"
)
parser.add_argument(
"--worker_index",
type=int,
default=0,
help="Worker index (>=0)"
)
parser.add_argument(
"--train_steps",
type=int,
default=1000,
help="Number of training steps"
)
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of evaluation steps"
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
Geertex/midioke | midiokeDOTexe.py | 1 | 1777 | # -*- coding: utf-8 -*-
"""
Created on a Sunday
@author: G-man
"""
import pyaudio
import wave
import struct
import math
import matplotlib.pyplot as plt
import numpy as np
from midiutil import MIDIFile
print("It is Recording.....")
print("If you want to stop just press 'Ctrl + C' ")
CHUNK = 4096
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
MIDIout = MIDIFile(1)
MIDIout.addTempo(0, 0, 60)
def getFFT(data):
data=data*np.hamming(len(data))
fft=np.fft.fft(data)
fft=np.abs(fft)
fft=10*np.log10(fft)
freq=np.fft.fftfreq(len(fft),1.0/RATE)
return freq[:int(len(freq)/2)],fft[:int(len(fft)/2)]
def freqToMidi(freqValue):
midiValue = int(round(69+12*math.log(max([(min([freqValue, 4000])), 20])/440,2)))
return midiValue
p = pyaudio.PyAudio()
plotFreq = [0] * 100
plotEnergy = [0] * 100
x = range(100)
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
plt.ion()
time = 0;
try:
while True:
data = np.fromstring(stream.read(CHUNK),np.int16)
freq, fft2 = getFFT(data)
maxEng = 0.0
maxIdx = 0
for idx, value in enumerate(fft2):
if value > maxEng:
maxEng = value
maxIdx = idx;
#plotEnergy.insert(0,maxEng)
#plotEnergy.pop()
midi = freqToMidi(freq[maxIdx]);
if maxEng > 40:
MIDIout.addNote(0, 0, midi, time, CHUNK/RATE, 100)
time = time + (CHUNK/RATE)
plotFreq.insert(0,midi)
plotFreq.pop()
plt.clf()
#plt.plot(x, plotEnergy)
plt.plot(x, plotFreq)
plt.draw()
plt.pause(0.01)
except KeyboardInterrupt:
pass
plt.show(block=True)
stream.stop_stream()
stream.close()
p.terminate()
with open("politically_correct_expression.mid", "wb") as output_file:
MIDIout.writeFile(output_file) | gpl-3.0 |
mgarbanzo/radarphysics | simplecapons.py | 1 | 2107 | #!/usr/bin/python
import numpy as np
from scipy import fftpack, pi
import matplotlib.pyplot as plt
#Frequencies to be used in the signal
freqs = 0.09, -0.2, 0.2, -0.3, 0.3, 0.08, -0.21, 0.22, -0.31, 0.32, 0.093, -0.24, 0.25, -0.34, 0.35, 0.098
time = np.arange(0,64,1)
sgn = np.zeros_like(time)+np.zeros_like(time,complex)
sgn1 = np.zeros_like(time)+np.zeros_like(time,complex)
sgn2 = np.zeros_like(time)+np.zeros_like(time,complex)
sgn3 = np.zeros_like(time)+np.zeros_like(time,complex)
sgn4 = np.zeros_like(time)+np.zeros_like(time,complex)
sgn5 = np.zeros_like(time)+np.zeros_like(time,complex)
M=np.mat([sgn1,sgn2,sgn3,sgn4,sgn5,sgn1,sgn2,sgn3,sgn4,sgn5,sgn1,sgn2,sgn3,sgn4,sgn5,sgn1])
k=0
for freq in freqs:
real = np.cos(2*pi*freq*time)
imag = np.sin(2*pi*freq*time)
for n in range(len(time)):
sgn[n] = np.complex(real[n],imag[n])
M[k] = sgn
k+=1
np.set_printoptions(precision=5,suppress=True)
Ry = M*M.H
print Ry
RyI = Ry.I
print RyI
print "RyI Shape: ", RyI.shape
##Defining the a(omega) vector:
a = np.zeros_like(range(16))+np.zeros_like(range(16),complex)
omega = -0.1
print "Omega: ", omega
for m in range(len(a)):
a[m] = np.complex(np.cos(-m*omega),np.sin(-m*omega))
a=np.matrix(a)
print a
print "a Shape: ", a.shape
print "a.H Shape: ", a.H.shape
h = RyI*a.T/(a.H.T*RyI*a.T)
print "h",h
HO = np.conjugate(h).T*a.T
print "At the value of w is should be 1: ",HO,np.abs(HO)
w = np.arange(-pi,pi,0.01)
H = np.zeros_like(w)
for i,k in enumerate(w):
a = np.zeros_like(range(16))+np.zeros_like(range(16),complex)
for m in range(len(a)):
a[m] = np.complex(np.cos(-m*k),np.sin(-m*k))
a=np.matrix(a)
tmp = np.conjugate(h).T*a.T
#print tmp
H[i] = np.real(np.abs(tmp))
#print H
H=np.array(H)
P = np.zeros_like(w)
for i,k in enumerate(w):
a = np.zeros_like(range(16))+np.zeros_like(range(16),complex)
for m in range(len(a)):
a[m] = np.complex(np.cos(-m*k),np.sin(-m*k))
a=np.matrix(a)
tmp = 1/(a.H.T*RyI*a.T)
#print tmp
P[i] = np.real(np.abs(tmp))
P=np.array(P)
plt.plot(w,P)
plt.show()
#~ plt.plot(xv,np.real(ft))
#~ plt.plot(xv,np.imag(ft))
#~ plt.show()
| gpl-3.0 |
yan9yu/NWD | src/newwords.py | 1 | 6475 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division
import re
import os
import math
import config
import pandas as pd
from collections import Counter
from collections import defaultdict
__author__ = 'yan9yu'
class NewWordsDetector:
def __init__(self, content):
self.content = content
self.words_frequency = self.get_words_freq()
self.words = self.get_words()
self.words_cohesion = self.get_words_cohesion()
self.words_entropy = self.get_words_entropy()
self.new_words = self.new_words_filter()
def get_words_freq(self):
words_frequency = Counter()
# 仅保留中文, 剔除英文及数字
pattern = re.compile(u'[^\u4e00-\u9fa5]+')
content = pattern.sub('', self.content)
# 采用滑动窗口的方式切分文本. 目前最大窗口为5.
## 向右切割字符串
words_frequency.update(content[ii:ii + nn] for nn in xrange(1, config.Threshold.MAX_NGRAM + 1) for ii in
xrange(len(content) - nn, -1, -1))
# ## 向左切割字符串
rev_content = content[::-1]
words_frequency.update(
rev_content[ii:ii + nn][::-1] for nn in xrange(1, config.Threshold.MAX_NGRAM + 1) for ii in
xrange(len(rev_content) - nn, -1, -1))
return words_frequency
def get_words(self):
# 剔除词频过低的词
raw = [word for word, count in self.words_frequency.iteritems() if
count >= config.Threshold.MIN_FREQUENCE and config.Threshold.MIN_LENGTH <= len(
word) <= config.Threshold.MAX_LENGTH]
words = {}
for word in raw:
words[word] = {}
words[word]["frequency"] = 0
words[word]["cohesion"] = 0
words[word]["entropy"] = 0
return words
def get_words_cohesion(self):
# 计算词之间的凝固度
words_cohesion = defaultdict(float)
MINF = config.Threshold.MIN_FREQUENCE
for word in self.words:
length = len(word)
frequency = self.words_frequency.get(word, config.Threshold.MIN_FREQUENCE)
# 每个词切分成两个词. 首个词的长度依次递增.
cohesions = map(lambda (x, y, z): x / (y * z),
[(frequency, self.words_frequency.get(word[0:ii], MINF),
self.words_frequency.get(word[ii:length], MINF)) for ii in xrange(1, length)])
words_cohesion[word] = min(cohesions)
return words_cohesion
def get_words_entropy(self):
# 计算词的最大熵
def _get_entropy(lists):
_entropy = 0.0
if lists:
_sum = sum(lists)
_prob = map(lambda x: x / _sum, lists)
_entropy = sum(map(lambda x: -x * math.log(x), _prob))
return _entropy
right_entropy = defaultdict(list)
# TODO: 最右熵计算
for word, count in self.words_frequency.iteritems():
length = len(word)
right_word = word[: - 1]
if length >= config.Threshold.MIN_LENGTH and right_word in self.words:
right_entropy[right_word].append(count)
# 获取每个词的最大熵
entropy = map(lambda x: _get_entropy(right_entropy.get(x, None)), self.words)
words_entropy = dict(zip(self.words, entropy))
return words_entropy
def merge(self, data):
# 合并正向和逆向结果
for word in data:
act_word = word[::-1]
if act_word in self.words:
self.words[act_word]["frequency"] += data[word]["frequency"]
self.words[act_word]["cohesion"] += data[word]["cohesion"]
self.words[act_word]["entropy"] += data[word]["entropy"]
else:
self.words[act_word] = data[word]
return self.words
def new_words_filter(self):
# 根据设定的阈值进行筛选
new_words = defaultdict(dict)
for word in self.words:
frequency = self.words_frequency[word]
cohesion = self.words_cohesion[word]
entropy = self.words_entropy[word]
data = {}
if frequency >= config.Threshold.MIN_FREQUENCE and cohesion >= config.Threshold.MIN_COHESION and entropy >= config.Threshold.MIN_ENTROPY:
data["frequency"] = frequency
data["cohesion"] = cohesion
data["entropy"] = entropy
new_words[word] = data
# 剔除长度较短的词, 保留最大长度的新词
words = new_words.keys()
words.sort(key=len)
sub_words = set()
for ii in xrange(0, len(words) - 1):
word_ii = words[ii]
for jj in xrange(ii + 1, len(words)):
word_jj = words[jj]
if len(word_ii) < len(word_jj) and word_ii in word_jj:
sub_words.add(word_ii)
for word in sub_words:
if word in new_words:
del new_words[word]
return new_words
def get_content(path):
content = ""
files = [path + item for item in os.listdir(path)]
for f in files:
with open(f, "r") as fp:
data = "".join(fp.readlines())
if len(data) > 0:
raw = data.strip().decode(encoding='utf-8', errors='ignore')
content += raw
return content
def main():
msg = "WARNING: Program running using below configuation\nFrequency:\t%s\nCohesion:\t%s\nEntropy:\t%s\n" % (
config.Threshold.MIN_FREQUENCE, config.Threshold.MIN_COHESION, config.Threshold.MIN_ENTROPY)
print config.bcolors.WARNING + msg + config.bcolors.ENDC
content = get_content(config.Path.CORPUS)
nwd = NewWordsDetector(content)
if config.Detection.IS_REVERSE:
content_ext = content[::-1]
new_words_ext = NewWordsDetector(content_ext)
nwd.merge(new_words_ext.new_words)
if len(nwd.new_words) == 0:
exit(-1)
msg = "COMPLETED! Get %s new words" % (len(nwd.new_words))
print config.bcolors.OKGREEN + msg + config.bcolors.ENDC
df = pd.DataFrame(nwd.new_words).T
df = df.sort_index(by=["entropy"], ascending=False)
result_path = config.Path.RESULT + "results.dat"
df.to_csv(result_path, sep="\t", encoding="utf-8")
if __name__ == "__main__":
main()
| mit |
rougier/Neurosciences | superior-colliculus/taouali-et-at-2014/fig-accuracy.py | 1 | 3999 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright INRIA
# Contributors: Wahiba Taouali (Wahiba.Taouali@inria.fr)
# Nicolas P. Rougier (Nicolas.Rougier@inria.fr)
#
# This software is governed by the CeCILL license under French law and abiding
# by the rules of distribution of free software. You can use, modify and/ or
# redistribute the software under the terms of the CeCILL license as circulated
# by CEA, CNRS and INRIA at the following URL
# http://www.cecill.info/index.en.html.
#
# As a counterpart to the access to the source code and rights to copy, modify
# and redistribute granted by the license, users are provided only with a
# limited warranty and the software's author, the holder of the economic
# rights, and the successive licensors have only limited liability.
#
# In this respect, the user's attention is drawn to the risks associated with
# loading, using, modifying and/or developing or reproducing the software by
# the user in light of its specific status of free software, that may mean that
# it is complicated to manipulate, and that also therefore means that it is
# reserved for developers and experienced professionals having in-depth
# computer knowledge. Users are therefore encouraged to load and test the
# software's suitability as regards their requirements in conditions enabling
# the security of their systems and/or data to be ensured and, more generally,
# to use and operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
# -----------------------------------------------------------------------------
import os.path
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from mpl_toolkits.axes_grid1.inset_locator import inset_axes, zoomed_inset_axes
from model import *
from graphics import *
from stimulus import *
from parameters import *
from projections import *
def decode(Z, xmin=+0.0, xmax=+2.0, ymin=-1.0, ymax=+1.0,):
Y,X = np.mgrid[0:Z.shape[0],0:Z.shape[1]]
X = xmin + X/float(Z.shape[0]-1)*(xmax-xmin)
Y = ymin + Y/float(Z.shape[1]-1)*(ymax-ymin)
Z_sum = Z.sum()
x = (Z*X).sum() / Z_sum
y = (Z*Y).sum() / Z_sum
return x,y
targets = []
for i in [2,3,4,5,6,7,8,9,10,15,20]:
for j in [-45, -30, -15, 0, +15, +30, +45]:
targets.append((i,j))
T = np.zeros((len(targets),2))
for i,target in enumerate(targets):
rho,theta = target
rho,theta = rho/90.0, np.pi*theta/180.0
x,y = polar_to_logpolar(rho,theta)
T[i] = 2*x,2*y-1
if not os.path.exists('accuracy.npy'):
model = Model()
D = np.zeros((len(targets),2))
for i,target in enumerate(targets):
rho,theta = target
x_,y_ = polar_to_logpolar(rho/90.0, np.pi*theta/180.0)
x_,y_ = 2*x_, 2*y_-1
model.reset()
model.R = stimulus((rho, theta))
model.run(duration=5*second, dt=5*millisecond, epsilon=0.001)
x,y = decode(model.SC_V)
D[i] = x,y
print 'Target at (%d,%d): %f' % (rho,theta, np.sqrt((x-x_)*(x-x_) + (y-y_)*(y-y_)))
np.save('accuracy.npy', D)
else:
D = np.load('accuracy.npy')
fig = plt.figure(figsize=(8,8), facecolor='w')
ax1 = plt.subplot(111, aspect=1)
logpolar_frame(ax1)
for i in range(len(D)):
plt.plot([T[i,0],D[i,0]],[T[i,1],D[i,1]], color='k')
ax1.scatter(T[:,0], T[:,1], s=50, color='k', marker='o')
ax1.scatter(D[:,0], D[:,1], s=50, color='k', marker='o', facecolors='w')
#axins = inset_axes(ax1, width='25%', height='8%', loc=3)
#X = np.linspace(0,90,model.RT_shape[1])
#retina = stimulus(position=(45.0,0.0), size= 25/90.0)
#axins.plot(X, retina[retina_shape[0]/2], lw=1, color='k')
#axins.set_xticks([])
#axins.set_yticks([])
#axins.set_xlim(25,65)
#axins.set_ylim(-0.1,1.1)
plt.savefig("fig-accuracy.pdf")
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/contrib/learn/python/learn/learn_io/__init__.py | 42 | 2656 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to allow different io formats (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import queue_parsed_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.numpy_io import numpy_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import pandas_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.generator_io import generator_input_fn
| mit |
nelango/ViralityAnalysis | model/lib/pandas/core/strings.py | 9 | 50351 | import numpy as np
from pandas.compat import zip
from pandas.core.common import (isnull, _values_from_object, is_bool_dtype, is_list_like,
is_categorical_dtype, is_object_dtype, take_1d)
import pandas.compat as compat
from pandas.core.base import AccessorProperty, NoNewAttributesMixin
from pandas.util.decorators import Appender, deprecate_kwarg
import re
import pandas.lib as lib
import warnings
import textwrap
_shared_docs = dict()
def _get_array_list(arr, others):
from pandas.core.series import Series
if len(others) and isinstance(_values_from_object(others)[0],
(list, np.ndarray, Series)):
arrays = [arr] + list(others)
else:
arrays = [arr, others]
return [np.asarray(x, dtype=object) for x in arrays]
def str_cat(arr, others=None, sep=None, na_rep=None):
"""
Concatenate strings in the Series/Index with given separator.
Parameters
----------
others : list-like, or list of list-likes
If None, returns str concatenating strings of the Series
sep : string or None, default None
na_rep : string or None, default None
If None, an NA in any array will propagate
Returns
-------
concat : Series/Index of objects or str
Examples
--------
If ``others`` is specified, corresponding values are
concatenated with the separator. Result will be a Series of strings.
>>> Series(['a', 'b', 'c']).str.cat(['A', 'B', 'C'], sep=',')
0 a,A
1 b,B
2 c,C
dtype: object
Otherwise, strings in the Series are concatenated. Result will be a string.
>>> Series(['a', 'b', 'c']).str.cat(sep=',')
'a,b,c'
Also, you can pass a list of list-likes.
>>> Series(['a', 'b']).str.cat([['x', 'y'], ['1', '2']], sep=',')
0 a,x,1
1 b,y,2
dtype: object
"""
if sep is None:
sep = ''
if others is not None:
arrays = _get_array_list(arr, others)
n = _length_check(arrays)
masks = np.array([isnull(x) for x in arrays])
cats = None
if na_rep is None:
na_mask = np.logical_or.reduce(masks, axis=0)
result = np.empty(n, dtype=object)
np.putmask(result, na_mask, np.nan)
notmask = ~na_mask
tuples = zip(*[x[notmask] for x in arrays])
cats = [sep.join(tup) for tup in tuples]
result[notmask] = cats
else:
for i, x in enumerate(arrays):
x = np.where(masks[i], na_rep, x)
if cats is None:
cats = x
else:
cats = cats + sep + x
result = cats
return result
else:
arr = np.asarray(arr, dtype=object)
mask = isnull(arr)
if na_rep is None and mask.any():
return np.nan
return sep.join(np.where(mask, na_rep, arr))
def _length_check(others):
n = None
for x in others:
if n is None:
n = len(x)
elif len(x) != n:
raise ValueError('All arrays must be same length')
return n
def _na_map(f, arr, na_result=np.nan, dtype=object):
# should really _check_ for NA
return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype)
def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):
from pandas.core.series import Series
if not len(arr):
return np.ndarray(0, dtype=dtype)
if isinstance(arr, Series):
arr = arr.values
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr, dtype=object)
if na_mask:
mask = isnull(arr)
try:
result = lib.map_infer_mask(arr, f, mask.view(np.uint8))
except (TypeError, AttributeError):
def g(x):
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return _map(g, arr, dtype=dtype)
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
else:
return lib.map_infer(arr, f)
def str_count(arr, pat, flags=0):
"""
Count occurrences of pattern in each string of the Series/Index.
Parameters
----------
pat : string, valid regular expression
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
counts : Series/Index of integer values
"""
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
return _na_map(f, arr, dtype=int)
def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
"""
Return boolean Series/``array`` whether given pattern/regex is
contained in each string in the Series/Index.
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
na : default NaN, fill value for missing values.
regex : bool, default True
If True use re.search, otherwise use Python in operator
Returns
-------
contained : Series/array of boolean values
See Also
--------
match : analogous, but stricter, relying on re.match instead of re.search
"""
if regex:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if regex.groups > 0:
warnings.warn("This pattern has match groups. To actually get the"
" groups, use str.extract.", UserWarning, stacklevel=3)
f = lambda x: bool(regex.search(x))
else:
if case:
f = lambda x: pat in x
else:
upper_pat = pat.upper()
f = lambda x: upper_pat in x
uppered = _na_map(lambda x: x.upper(), arr)
return _na_map(f, uppered, na, dtype=bool)
return _na_map(f, arr, na, dtype=bool)
def str_startswith(arr, pat, na=np.nan):
"""
Return boolean Series/``array`` indicating whether each string in the
Series/Index starts with passed pattern. Equivalent to
:meth:`str.startswith`.
Parameters
----------
pat : string
Character sequence
na : bool, default NaN
Returns
-------
startswith : Series/array of boolean values
"""
f = lambda x: x.startswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_endswith(arr, pat, na=np.nan):
"""
Return boolean Series indicating whether each string in the
Series/Index ends with passed pattern. Equivalent to
:meth:`str.endswith`.
Parameters
----------
pat : string
Character sequence
na : bool, default NaN
Returns
-------
endswith : Series/array of boolean values
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_replace(arr, pat, repl, n=-1, case=True, flags=0):
"""
Replace occurrences of pattern/regex in the Series/Index with
some other string. Equivalent to :meth:`str.replace` or
:func:`re.sub`.
Parameters
----------
pat : string
Character sequence or regular expression
repl : string
Replacement sequence
n : int, default -1 (all)
Number of replacements to make from start
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
replaced : Series/Index of objects
"""
use_re = not case or len(pat) > 1 or flags
if use_re:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
n = n if n >= 0 else 0
def f(x):
return regex.sub(repl, x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
return _na_map(f, arr)
def str_repeat(arr, repeats):
"""
Duplicate each string in the Series/Index by indicated number
of times.
Parameters
----------
repeats : int or array
Same value for all (int) or different value per (array)
Returns
-------
repeated : Series/Index of objects
"""
if np.isscalar(repeats):
def rep(x):
try:
return compat.binary_type.__mul__(x, repeats)
except TypeError:
return compat.text_type.__mul__(x, repeats)
return _na_map(rep, arr)
else:
def rep(x, r):
try:
return compat.binary_type.__mul__(x, r)
except TypeError:
return compat.text_type.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
result = lib.vec_binop(_values_from_object(arr), repeats, rep)
return result
def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=False):
"""
Deprecated: Find groups in each string in the Series/Index
using passed regular expression.
If as_indexer=True, determine if each string matches a regular expression.
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
na : default NaN, fill value for missing values.
as_indexer : False, by default, gives deprecated behavior better achieved
using str_extract. True return boolean indexer.
Returns
-------
Series/array of boolean values
if as_indexer=True
Series/Index of tuples
if as_indexer=False, default but deprecated
See Also
--------
contains : analagous, but less strict, relying on re.search instead of
re.match
extract : now preferred to the deprecated usage of match (as_indexer=False)
Notes
-----
To extract matched groups, which is the deprecated behavior of match, use
str.extract.
"""
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if (not as_indexer) and regex.groups > 0:
# Do this first, to make sure it happens even if the re.compile
# raises below.
warnings.warn("In future versions of pandas, match will change to"
" always return a bool indexer.", FutureWarning,
stacklevel=3)
if as_indexer and regex.groups > 0:
warnings.warn("This pattern has match groups. To actually get the"
" groups, use str.extract.", UserWarning, stacklevel=3)
# If not as_indexer and regex.groups == 0, this returns empty lists
# and is basically useless, so we will not warn.
if (not as_indexer) and regex.groups > 0:
dtype = object
def f(x):
m = regex.match(x)
if m:
return m.groups()
else:
return []
else:
# This is the new behavior of str_match.
dtype = bool
f = lambda x: bool(regex.match(x))
return _na_map(f, arr, na, dtype=dtype)
def _get_single_group_name(rx):
try:
return list(rx.groupindex.keys()).pop()
except IndexError:
return None
def str_extract(arr, pat, flags=0):
"""
Find groups in each string in the Series using passed regular
expression.
Parameters
----------
pat : string
Pattern or regular expression
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
extracted groups : Series (one group) or DataFrame (multiple groups)
Note that dtype of the result is always object, even when no match is
found and the result is a Series or DataFrame containing only NaN
values.
Examples
--------
A pattern with one group will return a Series. Non-matches will be NaN.
>>> Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)')
0 1
1 2
2 NaN
dtype: object
A pattern with more than one group will return a DataFrame.
>>> Series(['a1', 'b2', 'c3']).str.extract('([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> Series(['a1', 'b2', 'c3']).str.extract('([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> Series(['a1', 'b2', 'c3']).str.extract('(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
"""
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.index import Index
regex = re.compile(pat, flags=flags)
# just to be safe, check this
if regex.groups == 0:
raise ValueError("This pattern contains no groups to capture.")
empty_row = [np.nan]*regex.groups
def f(x):
if not isinstance(x, compat.string_types):
return empty_row
m = regex.search(x)
if m:
return [np.nan if item is None else item for item in m.groups()]
else:
return empty_row
if regex.groups == 1:
result = np.array([f(val)[0] for val in arr], dtype=object)
name = _get_single_group_name(regex)
else:
if isinstance(arr, Index):
raise ValueError("only one regex group is supported with Index")
name = None
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if arr.empty:
result = DataFrame(columns=columns, dtype=object)
else:
result = DataFrame([f(val) for val in arr],
columns=columns,
index=arr.index,
dtype=object)
return result, name
def str_get_dummies(arr, sep='|'):
"""
Split each string in the Series by sep and return a frame of
dummy/indicator variables.
Parameters
----------
sep : string, default "|"
String to split on.
Returns
-------
dummies : DataFrame
Examples
--------
>>> Series(['a|b', 'a', 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 1 0 0
2 1 0 1
>>> Series(['a|b', np.nan, 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 0 0 0
2 1 0 1
See Also
--------
pandas.get_dummies
"""
from pandas.core.frame import DataFrame
from pandas.core.index import Index
# GH9980, Index.str does not support get_dummies() as it returns a frame
if isinstance(arr, Index):
raise TypeError("get_dummies is not supported for string methods on Index")
# TODO remove this hack?
arr = arr.fillna('')
try:
arr = sep + arr + sep
except TypeError:
arr = sep + arr.astype(str) + sep
tags = set()
for ts in arr.str.split(sep):
tags.update(ts)
tags = sorted(tags - set([""]))
dummies = np.empty((len(arr), len(tags)), dtype=np.int64)
for i, t in enumerate(tags):
pat = sep + t + sep
dummies[:, i] = lib.map_infer(arr.values, lambda x: pat in x)
return DataFrame(dummies, arr.index, tags)
def str_join(arr, sep):
"""
Join lists contained as elements in the Series/Index with
passed delimiter. Equivalent to :meth:`str.join`.
Parameters
----------
sep : string
Delimiter
Returns
-------
joined : Series/Index of objects
"""
return _na_map(sep.join, arr)
def str_findall(arr, pat, flags=0):
"""
Find all occurrences of pattern or regular expression in the
Series/Index. Equivalent to :func:`re.findall`.
Parameters
----------
pat : string
Pattern or regular expression
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
matches : Series/Index of lists
"""
regex = re.compile(pat, flags=flags)
return _na_map(regex.findall, arr)
def str_find(arr, sub, start=0, end=None, side='left'):
"""
Return indexes in each strings in the Series/Index where the
substring is fully contained between [start:end]. Return -1 on failure.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
side : {'left', 'right'}, default 'left'
Specifies a starting side, equivalent to ``find`` or ``rfind``
Returns
-------
found : Series/Index of integer values
"""
if not isinstance(sub, compat.string_types):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'find'
elif side == 'right':
method = 'rfind'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_index(arr, sub, start=0, end=None, side='left'):
if not isinstance(sub, compat.string_types):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'index'
elif side == 'right':
method = 'rindex'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_pad(arr, width, side='left', fillchar=' '):
"""
Pad strings in the Series/Index with an additional character to
specified side.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with spaces
side : {'left', 'right', 'both'}, default 'left'
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
padded : Series/Index of objects
"""
if not isinstance(fillchar, compat.string_types):
msg = 'fillchar must be a character, not {0}'
raise TypeError(msg.format(type(fillchar).__name__))
if len(fillchar) != 1:
raise TypeError('fillchar must be a character, not str')
if side == 'left':
f = lambda x: x.rjust(width, fillchar)
elif side == 'right':
f = lambda x: x.ljust(width, fillchar)
elif side == 'both':
f = lambda x: x.center(width, fillchar)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr)
def str_split(arr, pat=None, n=None):
"""
Split each string (a la re.split) in the Series/Index by given
pattern, propagating NA values. Equivalent to :meth:`str.split`.
Parameters
----------
pat : string, default None
String or regular expression to split on. If None, splits on whitespace
n : int, default -1 (all)
None, 0 and -1 will be interpreted as return all splits
expand : bool, default False
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
.. versionadded:: 0.16.1
return_type : deprecated, use `expand`
Returns
-------
split : Series/Index or DataFrame/MultiIndex of objects
"""
if pat is None:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if len(pat) == 1:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if n is None or n == -1:
n = 0
regex = re.compile(pat)
f = lambda x: regex.split(x, maxsplit=n)
res = _na_map(f, arr)
return res
def str_rsplit(arr, pat=None, n=None):
"""
Split each string in the Series/Index by the given delimiter
string, starting at the end of the string and working to the front.
Equivalent to :meth:`str.rsplit`.
.. versionadded:: 0.16.2
Parameters
----------
pat : string, default None
Separator to split on. If None, splits on whitespace
n : int, default -1 (all)
None, 0 and -1 will be interpreted as return all splits
expand : bool, default False
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
Returns
-------
split : Series/Index or DataFrame/MultiIndex of objects
"""
if n is None or n == 0:
n = -1
f = lambda x: x.rsplit(pat, n)
res = _na_map(f, arr)
return res
def str_slice(arr, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series/Index
Parameters
----------
start : int or None
stop : int or None
step : int or None
Returns
-------
sliced : Series/Index of objects
"""
obj = slice(start, stop, step)
f = lambda x: x[obj]
return _na_map(f, arr)
def str_slice_replace(arr, start=None, stop=None, repl=None):
"""
Replace a slice of each string in the Series/Index with another
string.
Parameters
----------
start : int or None
stop : int or None
repl : str or None
String for replacement
Returns
-------
replaced : Series/Index of objects
"""
if repl is None:
repl = ''
def f(x):
if x[start:stop] == '':
local_stop = start
else:
local_stop = stop
y = ''
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return _na_map(f, arr)
def str_strip(arr, to_strip=None, side='both'):
"""
Strip whitespace (including newlines) from each string in the
Series/Index.
Parameters
----------
to_strip : str or unicode
side : {'left', 'right', 'both'}, default 'both'
Returns
-------
stripped : Series/Index of objects
"""
if side == 'both':
f = lambda x: x.strip(to_strip)
elif side == 'left':
f = lambda x: x.lstrip(to_strip)
elif side == 'right':
f = lambda x: x.rstrip(to_strip)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr)
def str_wrap(arr, width, **kwargs):
r"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line-width
expand_tabs : bool, optional
If true, tab characters will be expanded to spaces (default: True)
replace_whitespace : bool, optional
If true, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True)
drop_whitespace : bool, optional
If true, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True)
break_long_words : bool, optional
If true, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width. (default: True)
break_on_hyphens : bool, optional
If true, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words. (default: True)
Returns
-------
wrapped : Series/Index of objects
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
"""
kwargs['width'] = width
tw = textwrap.TextWrapper(**kwargs)
return _na_map(lambda s: '\n'.join(tw.wrap(s)), arr)
def str_translate(arr, table, deletechars=None):
"""
Map all characters in the string through the given mapping table.
Equivalent to standard :meth:`str.translate`. Note that the optional
argument deletechars is only valid if you are using python 2. For python 3,
character deletion should be specified via the table argument.
Parameters
----------
table : dict (python 3), str or None (python 2)
In python 3, table is a mapping of Unicode ordinals to Unicode ordinals,
strings, or None. Unmapped characters are left untouched. Characters
mapped to None are deleted. :meth:`str.maketrans` is a helper function
for making translation tables.
In python 2, table is either a string of length 256 or None. If the
table argument is None, no translation is applied and the operation
simply removes the characters in deletechars. :func:`string.maketrans`
is a helper function for making translation tables.
deletechars : str, optional (python 2)
A string of characters to delete. This argument is only valid
in python 2.
Returns
-------
translated : Series/Index of objects
"""
if deletechars is None:
f = lambda x: x.translate(table)
else:
from pandas import compat
if compat.PY3:
raise ValueError("deletechars is not a valid argument for "
"str.translate in python 3. You should simply "
"specify character deletions in the table argument")
f = lambda x: x.translate(table, deletechars)
return _na_map(f, arr)
def str_get(arr, i):
"""
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Integer index (location)
Returns
-------
items : Series/Index of objects
"""
f = lambda x: x[i] if len(x) > i else np.nan
return _na_map(f, arr)
def str_decode(arr, encoding, errors="strict"):
"""
Decode character string in the Series/Index to unicode
using indicated encoding. Equivalent to :meth:`str.decode`.
Parameters
----------
encoding : string
errors : string
Returns
-------
decoded : Series/Index of objects
"""
f = lambda x: x.decode(encoding, errors)
return _na_map(f, arr)
def str_encode(arr, encoding, errors="strict"):
"""
Encode character string in the Series/Index to some other encoding
using indicated encoding. Equivalent to :meth:`str.encode`.
Parameters
----------
encoding : string
errors : string
Returns
-------
encoded : Series/Index of objects
"""
f = lambda x: x.encode(encoding, errors)
return _na_map(f, arr)
def _noarg_wrapper(f, docstring=None, **kargs):
def wrapper(self):
result = _na_map(f, self._data, **kargs)
return self._wrap_result(result)
wrapper.__name__ = f.__name__
if docstring is not None:
wrapper.__doc__ = docstring
else:
raise ValueError('Provide docstring')
return wrapper
def _pat_wrapper(f, flags=False, na=False, **kwargs):
def wrapper1(self, pat):
result = f(self._data, pat)
return self._wrap_result(result)
def wrapper2(self, pat, flags=0, **kwargs):
result = f(self._data, pat, flags=flags, **kwargs)
return self._wrap_result(result)
def wrapper3(self, pat, na=np.nan):
result = f(self._data, pat, na=na)
return self._wrap_result(result)
wrapper = wrapper3 if na else wrapper2 if flags else wrapper1
wrapper.__name__ = f.__name__
if f.__doc__:
wrapper.__doc__ = f.__doc__
return wrapper
def copy(source):
"Copy a docstring from another source function (if present)"
def do_copy(target):
if source.__doc__:
target.__doc__ = source.__doc__
return target
return do_copy
class StringMethods(NoNewAttributesMixin):
"""
Vectorized string functions for Series and Index. NAs stay NA unless
handled otherwise by a particular method. Patterned after Python's string
methods, with some inspiration from R's stringr package.
Examples
--------
>>> s.str.split('_')
>>> s.str.replace('_', '')
"""
def __init__(self, data):
self._is_categorical = is_categorical_dtype(data)
self._data = data.cat.categories if self._is_categorical else data
# save orig to blow up categoricals to the right type
self._orig = data
self._freeze()
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop,
step=key.step)
else:
return self.get(key)
def __iter__(self):
i = 0
g = self.get(i)
while g.notnull().any():
yield g
i += 1
g = self.get(i)
def _wrap_result(self, result, use_codes=True, name=None):
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this before
# the transformation...
if use_codes and self._is_categorical:
result = take_1d(result, self._orig.cat.codes)
# leave as it is to keep extract and get_dummies results
# can be merged to _wrap_result_expand in v0.17
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.index import Index
if not hasattr(result, 'ndim'):
return result
name = name or getattr(result, 'name', None) or self._orig.name
if result.ndim == 1:
if isinstance(self._orig, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
return Index(result, name=name)
return Series(result, index=self._orig.index, name=name)
else:
assert result.ndim < 3
return DataFrame(result, index=self._orig.index)
def _wrap_result_expand(self, result, expand=False):
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
# for category, we do the stuff on the categories, so blow it up
# to the full series again
if self._is_categorical:
result = take_1d(result, self._orig.cat.codes)
from pandas.core.index import Index, MultiIndex
if not hasattr(result, 'ndim'):
return result
if isinstance(self._orig, Index):
name = getattr(result, 'name', None)
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if hasattr(result, 'dtype') and is_bool_dtype(result):
return result
if expand:
result = list(result)
return MultiIndex.from_tuples(result, names=name)
else:
return Index(result, name=name)
else:
index = self._orig.index
if expand:
def cons_row(x):
if is_list_like(x):
return x
else:
return [ x ]
cons = self._orig._constructor_expanddim
data = [cons_row(x) for x in result]
return cons(data, index=index)
else:
name = getattr(result, 'name', None)
cons = self._orig._constructor
return cons(result, name=name, index=index)
@copy(str_cat)
def cat(self, others=None, sep=None, na_rep=None):
data = self._orig if self._is_categorical else self._data
result = str_cat(data, others=others, sep=sep, na_rep=na_rep)
return self._wrap_result(result, use_codes=(not self._is_categorical))
@deprecate_kwarg('return_type', 'expand',
mapping={'series': False, 'frame': True})
@copy(str_split)
def split(self, pat=None, n=-1, expand=False):
result = str_split(self._data, pat, n=n)
return self._wrap_result_expand(result, expand=expand)
@copy(str_rsplit)
def rsplit(self, pat=None, n=-1, expand=False):
result = str_rsplit(self._data, pat, n=n)
return self._wrap_result_expand(result, expand=expand)
_shared_docs['str_partition'] = ("""
Split the string at the %(side)s occurrence of `sep`, and return 3 elements
containing the part before the separator, the separator itself,
and the part after the separator.
If the separator is not found, return %(return)s.
Parameters
----------
pat : string, default whitespace
String to split on.
expand : bool, default True
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
Returns
-------
split : DataFrame/MultiIndex or Series/Index of objects
See Also
--------
%(also)s
Examples
--------
>>> s = Series(['A_B_C', 'D_E_F', 'X'])
0 A_B_C
1 D_E_F
2 X
dtype: object
>>> s.str.partition('_')
0 1 2
0 A _ B_C
1 D _ E_F
2 X
>>> s.str.rpartition('_')
0 1 2
0 A_B _ C
1 D_E _ F
2 X
""")
@Appender(_shared_docs['str_partition'] % {'side': 'first',
'return': '3 elements containing the string itself, followed by two empty strings',
'also': 'rpartition : Split the string at the last occurrence of `sep`'})
def partition(self, pat=' ', expand=True):
f = lambda x: x.partition(pat)
result = _na_map(f, self._data)
return self._wrap_result_expand(result, expand=expand)
@Appender(_shared_docs['str_partition'] % {'side': 'last',
'return': '3 elements containing two empty strings, followed by the string itself',
'also': 'partition : Split the string at the first occurrence of `sep`'})
def rpartition(self, pat=' ', expand=True):
f = lambda x: x.rpartition(pat)
result = _na_map(f, self._data)
return self._wrap_result_expand(result, expand=expand)
@copy(str_get)
def get(self, i):
result = str_get(self._data, i)
return self._wrap_result(result)
@copy(str_join)
def join(self, sep):
result = str_join(self._data, sep)
return self._wrap_result(result)
@copy(str_contains)
def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):
result = str_contains(self._data, pat, case=case, flags=flags,
na=na, regex=regex)
return self._wrap_result(result)
@copy(str_match)
def match(self, pat, case=True, flags=0, na=np.nan, as_indexer=False):
result = str_match(self._data, pat, case=case, flags=flags,
na=na, as_indexer=as_indexer)
return self._wrap_result(result)
@copy(str_replace)
def replace(self, pat, repl, n=-1, case=True, flags=0):
result = str_replace(self._data, pat, repl, n=n, case=case,
flags=flags)
return self._wrap_result(result)
@copy(str_repeat)
def repeat(self, repeats):
result = str_repeat(self._data, repeats)
return self._wrap_result(result)
@copy(str_pad)
def pad(self, width, side='left', fillchar=' '):
result = str_pad(self._data, width, side=side, fillchar=fillchar)
return self._wrap_result(result)
_shared_docs['str_pad'] = ("""
Filling %(side)s side of strings in the Series/Index with an
additional character. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : Series/Index of objects
""")
@Appender(_shared_docs['str_pad'] % dict(side='left and right',
method='center'))
def center(self, width, fillchar=' '):
return self.pad(width, side='both', fillchar=fillchar)
@Appender(_shared_docs['str_pad'] % dict(side='right', method='ljust'))
def ljust(self, width, fillchar=' '):
return self.pad(width, side='right', fillchar=fillchar)
@Appender(_shared_docs['str_pad'] % dict(side='left', method='rjust'))
def rjust(self, width, fillchar=' '):
return self.pad(width, side='left', fillchar=fillchar)
def zfill(self, width):
""""
Filling left side of strings in the Series/Index with 0.
Equivalent to :meth:`str.zfill`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with 0
Returns
-------
filled : Series/Index of objects
"""
result = str_pad(self._data, width, side='left', fillchar='0')
return self._wrap_result(result)
@copy(str_slice)
def slice(self, start=None, stop=None, step=None):
result = str_slice(self._data, start, stop, step)
return self._wrap_result(result)
@copy(str_slice_replace)
def slice_replace(self, start=None, stop=None, repl=None):
result = str_slice_replace(self._data, start, stop, repl)
return self._wrap_result(result)
@copy(str_decode)
def decode(self, encoding, errors="strict"):
result = str_decode(self._data, encoding, errors)
return self._wrap_result(result)
@copy(str_encode)
def encode(self, encoding, errors="strict"):
result = str_encode(self._data, encoding, errors)
return self._wrap_result(result)
_shared_docs['str_strip'] = ("""
Strip whitespace (including newlines) from each string in the
Series/Index from %(side)s. Equivalent to :meth:`str.%(method)s`.
Returns
-------
stripped : Series/Index of objects
""")
@Appender(_shared_docs['str_strip'] % dict(side='left and right sides',
method='strip'))
def strip(self, to_strip=None):
result = str_strip(self._data, to_strip, side='both')
return self._wrap_result(result)
@Appender(_shared_docs['str_strip'] % dict(side='left side',
method='lstrip'))
def lstrip(self, to_strip=None):
result = str_strip(self._data, to_strip, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['str_strip'] % dict(side='right side',
method='rstrip'))
def rstrip(self, to_strip=None):
result = str_strip(self._data, to_strip, side='right')
return self._wrap_result(result)
@copy(str_wrap)
def wrap(self, width, **kwargs):
result = str_wrap(self._data, width, **kwargs)
return self._wrap_result(result)
@copy(str_get_dummies)
def get_dummies(self, sep='|'):
# we need to cast to Series of strings as only that has all
# methods available for making the dummies...
data = self._orig.astype(str) if self._is_categorical else self._data
result = str_get_dummies(data, sep)
return self._wrap_result(result, use_codes=(not self._is_categorical))
@copy(str_translate)
def translate(self, table, deletechars=None):
result = str_translate(self._data, table, deletechars)
return self._wrap_result(result)
count = _pat_wrapper(str_count, flags=True)
startswith = _pat_wrapper(str_startswith, na=True)
endswith = _pat_wrapper(str_endswith, na=True)
findall = _pat_wrapper(str_findall, flags=True)
@copy(str_extract)
def extract(self, pat, flags=0):
result, name = str_extract(self._data, pat, flags=flags)
return self._wrap_result(result, name=name)
_shared_docs['find'] = ("""
Return %(side)s indexes in each strings in the Series/Index
where the substring is fully contained between [start:end].
Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of integer values
See Also
--------
%(also)s
""")
@Appender(_shared_docs['find'] % dict(side='lowest', method='find',
also='rfind : Return highest indexes in each strings'))
def find(self, sub, start=0, end=None):
result = str_find(self._data, sub, start=start, end=end, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['find'] % dict(side='highest', method='rfind',
also='find : Return lowest indexes in each strings'))
def rfind(self, sub, start=0, end=None):
result = str_find(self._data, sub, start=start, end=end, side='right')
return self._wrap_result(result)
def normalize(self, form):
"""Return the Unicode normal form for the strings in the Series/Index.
For more information on the forms, see the
:func:`unicodedata.normalize`.
Parameters
----------
form : {'NFC', 'NFKC', 'NFD', 'NFKD'}
Unicode form
Returns
-------
normalized : Series/Index of objects
"""
import unicodedata
f = lambda x: unicodedata.normalize(form, compat.u_safe(x))
result = _na_map(f, self._data)
return self._wrap_result(result)
_shared_docs['index'] = ("""
Return %(side)s indexes in each strings where the substring is
fully contained between [start:end]. This is the same as ``str.%(similar)s``
except instead of returning -1, it raises a ValueError when the substring
is not found. Equivalent to standard ``str.%(method)s``.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of objects
See Also
--------
%(also)s
""")
@Appender(_shared_docs['index'] % dict(side='lowest', similar='find', method='index',
also='rindex : Return highest indexes in each strings'))
def index(self, sub, start=0, end=None):
result = str_index(self._data, sub, start=start, end=end, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['index'] % dict(side='highest', similar='rfind', method='rindex',
also='index : Return lowest indexes in each strings'))
def rindex(self, sub, start=0, end=None):
result = str_index(self._data, sub, start=start, end=end, side='right')
return self._wrap_result(result)
_shared_docs['len'] = ("""
Compute length of each string in the Series/Index.
Returns
-------
lengths : Series/Index of integer values
""")
len = _noarg_wrapper(len, docstring=_shared_docs['len'], dtype=int)
_shared_docs['casemethods'] = ("""
Convert strings in the Series/Index to %(type)s.
Equivalent to :meth:`str.%(method)s`.
Returns
-------
converted : Series/Index of objects
""")
_shared_docs['lower'] = dict(type='lowercase', method='lower')
_shared_docs['upper'] = dict(type='uppercase', method='upper')
_shared_docs['title'] = dict(type='titlecase', method='title')
_shared_docs['capitalize'] = dict(type='be capitalized',
method='capitalize')
_shared_docs['swapcase'] = dict(type='be swapcased', method='swapcase')
lower = _noarg_wrapper(lambda x: x.lower(),
docstring=_shared_docs['casemethods'] %
_shared_docs['lower'])
upper = _noarg_wrapper(lambda x: x.upper(),
docstring=_shared_docs['casemethods'] %
_shared_docs['upper'])
title = _noarg_wrapper(lambda x: x.title(),
docstring=_shared_docs['casemethods'] %
_shared_docs['title'])
capitalize = _noarg_wrapper(lambda x: x.capitalize(),
docstring=_shared_docs['casemethods'] %
_shared_docs['capitalize'])
swapcase = _noarg_wrapper(lambda x: x.swapcase(),
docstring=_shared_docs['casemethods'] %
_shared_docs['swapcase'])
_shared_docs['ismethods'] = ("""
Check whether all characters in each string in the Series/Index
are %(type)s. Equivalent to :meth:`str.%(method)s`.
Returns
-------
is : Series/array of boolean values
""")
_shared_docs['isalnum'] = dict(type='alphanumeric', method='isalnum')
_shared_docs['isalpha'] = dict(type='alphabetic', method='isalpha')
_shared_docs['isdigit'] = dict(type='digits', method='isdigit')
_shared_docs['isspace'] = dict(type='whitespace', method='isspace')
_shared_docs['islower'] = dict(type='lowercase', method='islower')
_shared_docs['isupper'] = dict(type='uppercase', method='isupper')
_shared_docs['istitle'] = dict(type='titlecase', method='istitle')
_shared_docs['isnumeric'] = dict(type='numeric', method='isnumeric')
_shared_docs['isdecimal'] = dict(type='decimal', method='isdecimal')
isalnum = _noarg_wrapper(lambda x: x.isalnum(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isalnum'])
isalpha = _noarg_wrapper(lambda x: x.isalpha(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isalpha'])
isdigit = _noarg_wrapper(lambda x: x.isdigit(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isdigit'])
isspace = _noarg_wrapper(lambda x: x.isspace(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isspace'])
islower = _noarg_wrapper(lambda x: x.islower(),
docstring=_shared_docs['ismethods'] %
_shared_docs['islower'])
isupper = _noarg_wrapper(lambda x: x.isupper(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isupper'])
istitle = _noarg_wrapper(lambda x: x.istitle(),
docstring=_shared_docs['ismethods'] %
_shared_docs['istitle'])
isnumeric = _noarg_wrapper(lambda x: compat.u_safe(x).isnumeric(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isnumeric'])
isdecimal = _noarg_wrapper(lambda x: compat.u_safe(x).isdecimal(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isdecimal'])
class StringAccessorMixin(object):
""" Mixin to add a `.str` acessor to the class."""
# string methods
def _make_str_accessor(self):
from pandas.core.series import Series
from pandas.core.index import Index
if isinstance(self, Series) and not(
(is_categorical_dtype(self.dtype) and
is_object_dtype(self.values.categories)) or
(is_object_dtype(self.dtype))):
# it's neither a string series not a categorical series with strings
# inside the categories.
# this really should exclude all series with any non-string values (instead of test
# for object dtype), but that isn't practical for performance reasons until we have a
# str dtype (GH 9343)
raise AttributeError("Can only use .str accessor with string "
"values, which use np.object_ dtype in "
"pandas")
elif isinstance(self, Index):
# see scc/inferrence.pyx which can contain string values
allowed_types = ('string', 'unicode', 'mixed', 'mixed-integer')
if self.inferred_type not in allowed_types:
message = ("Can only use .str accessor with string values "
"(i.e. inferred_type is 'string', 'unicode' or 'mixed')")
raise AttributeError(message)
if self.nlevels > 1:
message = "Can only use .str accessor with Index, not MultiIndex"
raise AttributeError(message)
return StringMethods(self)
str = AccessorProperty(StringMethods, _make_str_accessor)
def _dir_additions(self):
return set()
def _dir_deletions(self):
try:
getattr(self, 'str')
except AttributeError:
return set(['str'])
return set()
| mit |
wright-group/WrightTools | tests/artists/test_pcolor.py | 1 | 1139 | #! /usr/bin/env python3
import WrightTools as wt
from WrightTools import datasets
from matplotlib import pyplot as plt
import shutil
import os
def test_pcolor():
p = datasets.wt5.v1p0p1_MoS2_TrEE_movie
p = shutil.copy(p, "./test_pcolor.wt5")
data = wt.open(p)
os.unlink(p)
data.level(0, 2, -3)
data.convert("eV")
data.ai0.symmetric_root(2)
chop = data.chop("w1=wm", "w2", at={"d2": [-600, "fs"]})[0]
fig, gs = wt.artists.create_figure()
ax = plt.subplot(gs[0])
ax.pcolor(chop)
ax.pcolormesh(chop)
data.close()
chop.close()
def test_pcolor_lower_rank():
p = datasets.wt5.v1p0p1_MoS2_TrEE_movie
p = shutil.copy(p, "./test_pcolor_lower_rank.wt5")
data = wt.open(p)
os.unlink(p)
data.level(0, 2, -3)
data.convert("eV")
data.ai0.symmetric_root(2)
data.collapse("d2", method="sum")
fig, gs = wt.artists.create_figure()
ax = plt.subplot(gs[0])
ax.pcolor(data, channel="ai0_d2_sum")
ax.pcolormesh(data, channel="ai0_d2_sum")
data.close()
if __name__ == "__main__":
test_pcolor()
test_pcolor_lower_rank()
plt.show()
| mit |
lyuboshen/Pose-Estimation-on-Depth-Images-of-Clinical-Patients-V2.0 | src/test.py | 1 | 6673 | import cv2
import os
import numpy as np
import copy
from openpyxl import Workbook
from openpyxl import load_workbook
import xlrd
import xlwt
import json
image_rows = 424
image_cols = 512
data_path = 'images/trial_2/p3+5/middle/'
image_prefix = 'p3s1d_'
image_path = 'images/'
label_path = 'annotations/'
# depth_image = cv2.imread(os.path.join(data_path, '2000.png'),cv2.IMREAD_UNCHANGED)
# depth_image = depth_image.astype('float32')
# # depth_image *= (255.0/2500.0)
# # print(depth_image[200])
# # depth_image = depth_image.astype('int8')
# # depth_image = cv2.convertScaleAbs(depth_image, alpha=(1/2700))
# print(depth_image[10]/2500.0)
#
# # M_rotate = cv2.getRotationMatrix2D((512/2,424/2),180,1)
# # result = cv2.warpAffine(depth_image, M_rotate, (512, 424))
# result = depth_image.T
# flipped = cv2.flip(result,0)
# result = flipped[5:405,20:220]
# cv2.imshow('Labeling', result)
# cv2.waitKey(0)
# a = [1,0,0]
# b = [9,10,0]
# x1 = np.where(a == np.max(a))
# x2 = np.where(b == np.max(b))
# print(b.index(max(b)))
#
# import xlrd
# from pyExcelerator import *
#
# w = Workbook()
# ws = w.add_sheet('Sheet1')
#
# fname = "result.xlsx"
# bk = xlrd.open_workbook(fname)
# shxrange = range(bk.nsheets)
# try:
# sh = bk.sheet_by_name("Sheet1")
# except:
# print "no sheet in %s named Sheet1" % fname
#
# nrows = sh.nrows
# ncols = sh.ncols
# print "nrows %d, ncols %d" % (nrows,ncols)
#
#
# w.save('result.xlsx')
# import scipy.ndimage as ndimage
# import matplotlib.pyplot as plt
# import scipy.ndimage.filters as filters
# fig = plt.figure()
# plt.gray() # show the filtered result in grayscale
# ax1 = fig.add_subplot(221) # left side
# ax2 = fig.add_subplot(222) # right side
# ax3 = fig.add_subplot(223) # left side
# ax4 = fig.add_subplot(224) # right side
# wb = Workbook()
# dest_filename = 'result1.xlsx'
# ws1 = wb.active
# ws1.title = "Sheet_1"
# wb.save(filename = dest_filename)
# fname = "result1.xlsx"
# wb = load_workbook(filename = fname)
# sheets = wb.get_sheet_names()
# count = len(sheets)
# index = count+1
# new_ws = wb.create_sheet('Sheet_'+str(index))
# wb.save(fname)
# fname = "result1.xlsx"
# oldb = xlrd.open_workbook(fname)
# newb = xlwt.Workbook()
# count = len(oldb.sheets())
# print(count)
# allSheets = []
# for i in range(count):
# allSheets.append(oldb.sheet_by_index(i))
# newb._Workbook__worksheets = allSheets
# index = count+1
# ws = newb.add_sheet('Sheet_'+str(index))
# ws.write(0,0, 'Recall')
# newb.save(fname)
# #
# def gaussian_kernel(h, w, sigma_h, sigma_w):
# yx = np.mgrid[-h//2:h//2,-w//2:w//2]**2
# return np.exp(-yx[0,:,:] / sigma_h**2 - yx[1,:,:] / sigma_w**2).astype('float32')
#
#
# cv2.imshow('result1',gaussian_kernel(150, 150, 25, 25))
# cv2.imshow('result2',gaussian_kernel(80, 80, 10, 10))
# cv2.waitKey()
# json_file = 'annotations/all_patient.json'
# with open(json_file) as r:
# file_p = json.load(r)
# for i in range(0,len(file_p)):
# for i in range(0,len(file_p[i]["joints"])):
# x = file_p[i]["joints"][i][0]
# file_p[i]["joints"][i][0] = file_p[i]["joints"][i][1]
# file_p[i]["joints"][i][1] = x
#
# with open(json_file, 'w') as w:
# json.dump(file_p, w)
def gaussian_kernel(h, w, sigma_h, sigma_w):
yx = np.mgrid[-h // 2:h // 2, -w // 2:w // 2] ** 2
return np.exp(-yx[0, :, :] / sigma_h ** 2 - yx[1, :, :] / sigma_w ** 2)
def gen_kernel(score_map,border=400, sigma_h = 10, sigma_w = 10):
kernal = gaussian_kernel(score_map.shape[0]+border, score_map.shape[1]+border, sigma_h, sigma_w)
y, x = np.unravel_index(np.argmax(score_map), [len(score_map), len(score_map[0])])
dh, dw = score_map.shape[0] // 2, score_map.shape[1] // 2
y0, x0, y1, x1 = np.array([dh - y, dw - x, 3*dh - y, 3*dw - x]) + border // 2
return kernal[y0:y1, x0:x1]
def prepare_centered_annotation(annotations, centers, H, W, train_depth, num_parts=14):
print(annotations.shape)
print(centers.shape)
single_annotation_maps = np.ndarray((1, H, W, 14), dtype=np.float32)
single_centered_resized_maps = np.ndarray((46, 38, 15), dtype=np.float32)
annotation_maps = np.ndarray((len(annotations), 46, 38, 15), dtype=np.float32)
for mid in range(len(annotations)):
for pid in range(num_parts):
score_map = np.zeros((H, W))
score_map[annotations[mid][pid][0]][annotations[mid][pid][1]] = 1
single_annotation_maps[0,:,:,pid] = gen_kernel(score_map)
cv2.imwrite('help/0.png', 255*(train_depth[mid, :, :, 0] * 0.7 + single_annotation_maps[0,:,:,0]*0.3))
cv2.imwrite('help/1.png', 255*(train_depth[mid, :, :, 0] * 0.7 + single_annotation_maps[0, :, :, 1] * 0.3))
cv2.imwrite('help/2.png', 255*(train_depth[mid, :, :, 0] * 0.7 + single_annotation_maps[0, :, :, 2] * 0.3))
cv2.imwrite('help/3.png', 255*(train_depth[mid, :, :, 0] * 0.7 + single_annotation_maps[0, :, :, 3] * 0.3))
cv2.imwrite('help/4.png', 255*(train_depth[mid, :, :, 0] * 0.7 + single_annotation_maps[0, :, :, 4] * 0.3))
cv2.imwrite('help/5.png', 255*(train_depth[mid, :, :, 0] * 0.7 + single_annotation_maps[0, :, :, 5] * 0.3))
cv2.imwrite('help/6.png', 255*(train_depth[mid, :, :, 0] * 0.7 + single_annotation_maps[0, :, :, 6] * 0.3))
cv2.imwrite('help/7.png', 255*(train_depth[mid, :, :, 0] * 0.7 + single_annotation_maps[0, :, :, 7] * 0.3))
cv2.imwrite('help/8.png', 255*(train_depth[mid, :, :, 0] * 0.7 + single_annotation_maps[0, :, :, 8] * 0.3))
cv2.imwrite('help/9.png', 255*(train_depth[mid, :, :, 0] * 0.7 + single_annotation_maps[0, :, :, 9] * 0.3))
cv2.imwrite('help/10.png', 255*(train_depth[mid, :, :, 0] * 0.7 + single_annotation_maps[0, :, :, 10] * 0.3))
cv2.imwrite('help/11.png', 255*(train_depth[mid, :, :, 0] * 0.7 + single_annotation_maps[0, :, :, 11] * 0.3))
cv2.imwrite('help/12.png', 255*(train_depth[mid, :, :, 0] * 0.7 + single_annotation_maps[0, :, :, 12] * 0.3))
cv2.imwrite('help/13.png', 255*(train_depth[mid, :, :, 0] * 0.7+ single_annotation_maps[0, :, :, 13] * 0.3))
break
return annotation_maps
train_depth_path = './dataset/test_autoencoder_depth.npy'
train_ir_path = './dataset/test_autoencoder_ir.npy'
train_center_path = './dataset/test_autoencoder_center.npy'
train_annotation_path = './dataset/test_autoencoder_annotation.npy'
train_images = np.load(train_depth_path)
train_centers = np.load(train_center_path)
train_annotation = np.load(train_annotation_path)
train_images = train_images.astype('float32')/255.0
prepare_centered_annotation(train_annotation, train_centers, 376, 312,train_images) | mit |
robbymeals/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 129 | 7848 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
yl565/statsmodels | statsmodels/examples/ex_generic_mle.py | 32 | 16462 |
from __future__ import print_function
import numpy as np
from scipy import stats
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
# in this dir
probit_mod = sm.Probit(data.endog, data.exog)
probit_res = probit_mod.fit()
loglike = probit_mod.loglike
score = probit_mod.score
mod = GenericLikelihoodModel(data.endog, data.exog*2, loglike, score)
res = mod.fit(method="nm", maxiter = 500)
def probitloglike(params, endog, exog):
"""
Log likelihood for the probit
"""
q = 2*endog - 1
X = exog
return np.add.reduce(stats.norm.logcdf(q*np.dot(X,params)))
mod = GenericLikelihoodModel(data.endog, data.exog, loglike=probitloglike)
res = mod.fit(method="nm", fargs=(data.endog,data.exog), maxiter=500)
print(res)
#np.allclose(res.params, probit_res.params)
print(res.params, probit_res.params)
#datal = sm.datasets.longley.load()
datal = sm.datasets.ccard.load()
datal.exog = sm.add_constant(datal.exog, prepend=False)
# Instance of GenericLikelihood model doesn't work directly, because loglike
# cannot get access to data in self.endog, self.exog
nobs = 5000
rvs = np.random.randn(nobs,6)
datal.exog = rvs[:,:-1]
datal.exog = sm.add_constant(datal.exog, prepend=False)
datal.endog = 1 + rvs.sum(1)
show_error = False
show_error2 = 1#False
if show_error:
def loglike_norm_xb(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma)
mod_norm = GenericLikelihoodModel(datal.endog, datal.exog, loglike_norm_xb)
res_norm = mod_norm.fit(method="nm", maxiter = 500)
print(res_norm.params)
if show_error2:
def loglike_norm_xb(params, endog, exog):
beta = params[:-1]
sigma = params[-1]
#print exog.shape, beta.shape
xb = np.dot(exog, beta)
#print xb.shape, stats.norm.logpdf(endog, loc=xb, scale=sigma).shape
return stats.norm.logpdf(endog, loc=xb, scale=sigma).sum()
mod_norm = GenericLikelihoodModel(datal.endog, datal.exog, loglike_norm_xb)
res_norm = mod_norm.fit(start_params=np.ones(datal.exog.shape[1]+1),
method="nm", maxiter = 5000,
fargs=(datal.endog, datal.exog))
print(res_norm.params)
class MygMLE(GenericLikelihoodModel):
# just for testing
def loglike(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma).sum()
def loglikeobs(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma)
mod_norm2 = MygMLE(datal.endog, datal.exog)
#res_norm = mod_norm.fit(start_params=np.ones(datal.exog.shape[1]+1), method="nm", maxiter = 500)
res_norm2 = mod_norm2.fit(start_params=[1.]*datal.exog.shape[1]+[1], method="nm", maxiter = 500)
print(res_norm2.params)
res2 = sm.OLS(datal.endog, datal.exog).fit()
start_params = np.hstack((res2.params, np.sqrt(res2.mse_resid)))
res_norm3 = mod_norm2.fit(start_params=start_params, method="nm", maxiter = 500,
retall=0)
print(start_params)
print(res_norm3.params)
print(res2.bse)
#print res_norm3.bse # not available
print('llf', res2.llf, res_norm3.llf)
bse = np.sqrt(np.diag(np.linalg.inv(res_norm3.model.hessian(res_norm3.params))))
res_norm3.model.score(res_norm3.params)
#fprime in fit option cannot be overwritten, set to None, when score is defined
# exception is fixed, but I don't think score was supposed to be called
'''
>>> mod_norm2.fit(start_params=start_params, method="bfgs", fprime=None, maxiter
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 316, in fit
disp=disp, retall=retall, callback=callback)
File "C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6
579.win32\Programs\Python25\Lib\site-packages\scipy\optimize\optimize.py", line
710, in fmin_bfgs
gfk = myfprime(x0)
File "C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6
579.win32\Programs\Python25\Lib\site-packages\scipy\optimize\optimize.py", line
103, in function_wrapper
return function(x, *args)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 240, in <lambda>
score = lambda params: -self.score(params)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 480, in score
return approx_fprime1(params, self.nloglike)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\sandbox\regression\numdiff.py", line 81, in approx_fprime1
nobs = np.size(f0) #len(f0)
TypeError: object of type 'numpy.float64' has no len()
'''
res_bfgs = mod_norm2.fit(start_params=start_params, method="bfgs", fprime=None,
maxiter = 500, retall=0)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
hb=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
hf=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
hh = (hf+hb)/2.
print(np.linalg.eigh(hh))
grad = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
print(grad)
gradb = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
gradf = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
print((gradb+gradf)/2.)
print(res_norm3.model.score(res_norm3.params))
print(res_norm3.model.score(start_params))
mod_norm2.loglike(start_params/2.)
print(np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params)))
print(np.sqrt(np.diag(res_bfgs.cov_params())))
print(res_norm3.bse)
print("MLE - OLS parameter estimates")
print(res_norm3.params[:-1] - res2.params)
print("bse diff in percent")
print((res_norm3.bse[:-1] / res2.bse)*100. - 100)
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
Optimization terminated successfully.
Current function value: 12.818804
Iterations 6
Optimization terminated successfully.
Current function value: 12.818804
Iterations: 439
Function evaluations: 735
Optimization terminated successfully.
Current function value: 12.818804
Iterations: 439
Function evaluations: 735
<statsmodels.model.LikelihoodModelResults object at 0x02131290>
[ 1.6258006 0.05172931 1.42632252 -7.45229732] [ 1.62581004 0.05172895 1.42633234 -7.45231965]
Warning: Maximum number of function evaluations has been exceeded.
[ -1.18109149 246.94438535 -16.21235536 24.05282629 -324.80867176
274.07378453]
Warning: Maximum number of iterations has been exceeded
[ 17.57107 -149.87528787 19.89079376 -72.49810777 -50.06067953
306.14170418]
Optimization terminated successfully.
Current function value: 506.488765
Iterations: 339
Function evaluations: 550
[ -3.08181404 234.34702702 -14.99684418 27.94090839 -237.1465136
284.75079529]
[ -3.08181304 234.34701361 -14.99684381 27.94088692 -237.14649571
274.6857294 ]
[ 5.51471653 80.36595035 7.46933695 82.92232357 199.35166485]
llf -506.488764864 -506.488764864
Optimization terminated successfully.
Current function value: 506.488765
Iterations: 9
Function evaluations: 13
Gradient evaluations: 13
(array([ 2.41772580e-05, 1.62492628e-04, 2.79438138e-04,
1.90996240e-03, 2.07117946e-01, 1.28747174e+00]), array([[ 1.52225754e-02, 2.01838216e-02, 6.90127235e-02,
-2.57002471e-04, -5.25941060e-01, -8.47339404e-01],
[ 2.39797491e-01, -2.32325602e-01, -9.36235262e-01,
3.02434938e-03, 3.95614029e-02, -1.02035585e-01],
[ -2.11381471e-02, 3.01074776e-02, 7.97208277e-02,
-2.94955832e-04, 8.49402362e-01, -5.20391053e-01],
[ -1.55821981e-01, -9.66926643e-01, 2.01517298e-01,
1.52397702e-03, 4.13805882e-03, -1.19878714e-02],
[ -9.57881586e-01, 9.87911166e-02, -2.67819451e-01,
1.55192932e-03, -1.78717579e-02, -2.55757014e-02],
[ -9.96486655e-04, -2.03697290e-03, -2.98130314e-03,
-9.99992985e-01, -1.71500426e-05, 4.70854949e-06]]))
[[ -4.91007768e-05 -7.28732630e-07 -2.51941401e-05 -2.50111043e-08
-4.77484718e-08 -9.72022463e-08]]
[[ -1.64845915e-08 -2.87059265e-08 -2.88764568e-07 -6.82121026e-09
2.84217094e-10 -1.70530257e-09]]
[ -4.90678076e-05 -6.71320777e-07 -2.46166110e-05 -1.13686838e-08
-4.83169060e-08 -9.37916411e-08]
[ -4.56753924e-05 -6.50857146e-07 -2.31756303e-05 -1.70530257e-08
-4.43378667e-08 -1.75592936e-02]
[[ 2.99386348e+01 -1.24442928e+02 9.67254672e+00 -1.58968536e+02
-5.91960010e+02 -2.48738183e+00]
[ -1.24442928e+02 5.62972166e+03 -5.00079203e+02 -7.13057475e+02
-7.82440674e+03 -1.05126925e+01]
[ 9.67254672e+00 -5.00079203e+02 4.87472259e+01 3.37373299e+00
6.96960872e+02 7.69866589e-01]
[ -1.58968536e+02 -7.13057475e+02 3.37373299e+00 6.82417837e+03
4.84485862e+03 3.21440021e+01]
[ -5.91960010e+02 -7.82440674e+03 6.96960872e+02 4.84485862e+03
3.43753691e+04 9.37524459e+01]
[ -2.48738183e+00 -1.05126925e+01 7.69866589e-01 3.21440021e+01
9.37524459e+01 5.23915258e+02]]
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> print res_norm3.model.score(res_norm3.params)
[ -4.90678076e-05 -6.71320777e-07 -2.46166110e-05 -1.13686838e-08
-4.83169060e-08 -9.37916411e-08]
>>> print res_norm3.model.score(start_params)
[ -4.56753924e-05 -6.50857146e-07 -2.31756303e-05 -1.70530257e-08
-4.43378667e-08 -1.75592936e-02]
>>> mod_norm2.loglike(start_params/2.)
-598.56178102781314
>>> print np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params))
[[ 2.99386348e+01 -1.24442928e+02 9.67254672e+00 -1.58968536e+02
-5.91960010e+02 -2.48738183e+00]
[ -1.24442928e+02 5.62972166e+03 -5.00079203e+02 -7.13057475e+02
-7.82440674e+03 -1.05126925e+01]
[ 9.67254672e+00 -5.00079203e+02 4.87472259e+01 3.37373299e+00
6.96960872e+02 7.69866589e-01]
[ -1.58968536e+02 -7.13057475e+02 3.37373299e+00 6.82417837e+03
4.84485862e+03 3.21440021e+01]
[ -5.91960010e+02 -7.82440674e+03 6.96960872e+02 4.84485862e+03
3.43753691e+04 9.37524459e+01]
[ -2.48738183e+00 -1.05126925e+01 7.69866589e-01 3.21440021e+01
9.37524459e+01 5.23915258e+02]]
>>> print np.sqrt(np.diag(res_bfgs.cov_params()))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
>>> print res_norm3.bse
[ 5.47162086 75.03147114 6.98192136 82.60858536 185.40595756
22.88919522]
>>> res_norm3.conf_int
<bound method LikelihoodModelResults.conf_int of <statsmodels.model.LikelihoodModelResults object at 0x021317F0>>
>>> res_norm3.conf_int()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 993, in conf_int
lower = self.params - dist.ppf(1-alpha/2,self.model.df_resid) *\
AttributeError: 'MygMLE' object has no attribute 'df_resid'
>>> res_norm3.params
array([ -3.08181304, 234.34701361, -14.99684381, 27.94088692,
-237.14649571, 274.6857294 ])
>>> res2.params
array([ -3.08181404, 234.34702702, -14.99684418, 27.94090839,
-237.1465136 ])
>>>
>>> res_norm3.params - res2.params
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: shape mismatch: objects cannot be broadcast to a single shape
>>> res_norm3.params[:-1] - res2.params
array([ 9.96859735e-07, -1.34122981e-05, 3.72278400e-07,
-2.14645839e-05, 1.78919019e-05])
>>>
>>> res_norm3.bse[:-1] - res2.bse
array([ -0.04309567, -5.33447922, -0.48741559, -0.31373822, -13.94570729])
>>> (res_norm3.bse[:-1] / res2.bse) - 1
array([-0.00781467, -0.06637735, -0.06525554, -0.00378352, -0.06995531])
>>> (res_norm3.bse[:-1] / res2.bse)*100. - 100
array([-0.7814667 , -6.6377355 , -6.52555369, -0.37835193, -6.99553089])
>>> np.sqrt(np.diag(np.linalg.inv(res_norm3.model.hessian(res_bfgs.params))))
array([ NaN, NaN, NaN, NaN, NaN, NaN])
>>> np.sqrt(np.diag(np.linalg.inv(-res_norm3.model.hessian(res_bfgs.params))))
array([ 5.10032831, 74.34988912, 6.96522122, 76.7091604 ,
169.8117832 , 22.91695494])
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> res2.bse
array([ 5.51471653, 80.36595035, 7.46933695, 82.92232357,
199.35166485])
>>>
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(-res_norm3.model.hessian(res_bfgs.params))))
>>> (bse_bfgs[:-1] / res2.bse)*100. - 100
array([ -7.51422527, -7.4858335 , -6.74913633, -7.49275094, -14.8179759 ])
>>> hb=-approx_hess(res_bfgs.params, mod_norm2.loglike, epsilon=-1e-4)
>>> hf=-approx_hess(res_bfgs.params, mod_norm2.loglike, epsilon=1e-4)
>>> hh = (hf+hb)/2.
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(-hh)))
>>> bse_bfgs
array([ NaN, NaN, NaN, NaN, NaN, NaN])
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(hh)))
>>> np.diag(hh)
array([ 9.81680159e-01, 1.39920076e-02, 4.98101826e-01,
3.60955710e-04, 9.57811608e-04, 1.90709670e-03])
>>> np.diag(np.inv(hh))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'inv'
>>> np.diag(np.linalg.inv(hh))
array([ 2.64875153e+01, 5.91578496e+03, 5.13279911e+01,
6.11533345e+03, 3.33775960e+04, 5.24357391e+02])
>>> res2.bse**2
array([ 3.04120984e+01, 6.45868598e+03, 5.57909945e+01,
6.87611175e+03, 3.97410863e+04])
>>> bse_bfgs
array([ 5.14660231, 76.91414015, 7.1643556 , 78.20059751,
182.69536402, 22.89885131])
>>> bse_bfgs - res_norm3.bse
array([-0.32501855, 1.88266901, 0.18243424, -4.40798785, -2.71059354,
0.00965609])
>>> (bse_bfgs[:-1] / res2.bse)*100. - 100
array([-6.67512508, -4.29511526, -4.0831115 , -5.69415552, -8.35523538])
>>> (res_norm3.bse[:-1] / res2.bse)*100. - 100
array([-0.7814667 , -6.6377355 , -6.52555369, -0.37835193, -6.99553089])
>>> (bse_bfgs / res_norm3.bse)*100. - 100
array([-5.94007812, 2.50917247, 2.61295176, -5.33599242, -1.46197759,
0.04218624])
>>> bse_bfgs
array([ 5.14660231, 76.91414015, 7.1643556 , 78.20059751,
182.69536402, 22.89885131])
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> res2.bse
array([ 5.51471653, 80.36595035, 7.46933695, 82.92232357,
199.35166485])
>>> dir(res_bfgs)
['__class__', '__delattr__', '__dict__', '__doc__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__str__', '__weakref__', 'bse', 'conf_int', 'cov_params', 'f_test', 'initialize', 'llf', 'mle_retvals', 'mle_settings', 'model', 'normalized_cov_params', 'params', 'scale', 't', 't_test']
>>> res_bfgs.scale
1.0
>>> res2.scale
81083.015420213851
>>> res2.mse_resid
81083.015420213851
>>> print np.sqrt(np.diag(np.linalg.inv(-1*mod_norm2.hessian(res_bfgs.params))))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
>>> print np.sqrt(np.diag(np.linalg.inv(-1*res_bfgs.model.hessian(res_bfgs.params))))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
Is scale a misnomer, actually scale squared, i.e. variance of error term ?
'''
print(res_norm3.model.score_obs(res_norm3.params).shape)
jac = res_norm3.model.score_obs(res_norm3.params)
print(np.sqrt(np.diag(np.dot(jac.T, jac)))/start_params)
jac2 = res_norm3.model.score_obs(res_norm3.params, centered=True)
print(np.sqrt(np.diag(np.linalg.inv(np.dot(jac.T, jac)))))
print(res_norm3.bse)
print(res2.bse)
| bsd-3-clause |
Canpio/Paddle | benchmark/paddle/image/plotlog.py | 7 | 3298 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import matplotlib.pyplot as plt
def parse_args():
parser = argparse.ArgumentParser('Parse Log')
parser.add_argument(
'--file_path', '-f', type=str, help='the path of the log file')
parser.add_argument(
'--sample_rate',
'-s',
type=float,
default=1.0,
help='the rate to take samples from log')
parser.add_argument(
'--log_period', '-p', type=int, default=1, help='the period of log')
args = parser.parse_args()
return args
def parse_file(file_name):
loss = []
error = []
with open(file_name) as f:
for i, line in enumerate(f):
line = line.strip()
if not line.startswith('pass'):
continue
line_split = line.split(' ')
if len(line_split) != 5:
continue
loss_str = line_split[2][:-1]
cur_loss = float(loss_str.split('=')[-1])
loss.append(cur_loss)
err_str = line_split[3][:-1]
cur_err = float(err_str.split('=')[-1])
error.append(cur_err)
accuracy = [1.0 - err for err in error]
return loss, accuracy
def sample(metric, sample_rate):
interval = int(1.0 / sample_rate)
if interval > len(metric):
return metric[:1]
num = len(metric) / interval
idx = [interval * i for i in range(num)]
metric_sample = [metric[id] for id in idx]
return metric_sample
def plot_metric(metric,
batch_id,
graph_title,
line_style='b-',
line_label='y',
line_num=1):
plt.figure()
plt.title(graph_title)
if line_num == 1:
plt.plot(batch_id, metric, line_style, label=line_label)
else:
for i in range(line_num):
plt.plot(batch_id, metric[i], line_style[i], label=line_label[i])
plt.xlabel('batch')
plt.ylabel(graph_title)
plt.legend()
plt.savefig(graph_title + '.jpg')
plt.close()
def main():
args = parse_args()
assert args.sample_rate > 0. and args.sample_rate <= 1.0, "The sample rate should in the range (0, 1]."
loss, accuracy = parse_file(args.file_path)
batch = [args.log_period * i for i in range(len(loss))]
batch_sample = sample(batch, args.sample_rate)
loss_sample = sample(loss, args.sample_rate)
accuracy_sample = sample(accuracy, args.sample_rate)
plot_metric(loss_sample, batch_sample, 'loss', line_label='loss')
plot_metric(
accuracy_sample,
batch_sample,
'accuracy',
line_style='g-',
line_label='accuracy')
if __name__ == '__main__':
main()
| apache-2.0 |
bundgus/python-playground | matplotlib-playground/examples/event_handling/data_browser.py | 3 | 2345 | import numpy as np
class PointBrowser(object):
"""
Click on a point to select and highlight it -- the data that
generated the point will be shown in the lower axes. Use the 'n'
and 'p' keys to browse through the next and previous points
"""
def __init__(self):
self.lastind = 0
self.text = ax.text(0.05, 0.95, 'selected: none',
transform=ax.transAxes, va='top')
self.selected, = ax.plot([xs[0]], [ys[0]], 'o', ms=12, alpha=0.4,
color='yellow', visible=False)
def onpress(self, event):
if self.lastind is None:
return
if event.key not in ('n', 'p'):
return
if event.key == 'n':
inc = 1
else:
inc = -1
self.lastind += inc
self.lastind = np.clip(self.lastind, 0, len(xs) - 1)
self.update()
def onpick(self, event):
if event.artist != line:
return True
N = len(event.ind)
if not N:
return True
# the click locations
x = event.mouseevent.xdata
y = event.mouseevent.ydata
distances = np.hypot(x - xs[event.ind], y - ys[event.ind])
indmin = distances.argmin()
dataind = event.ind[indmin]
self.lastind = dataind
self.update()
def update(self):
if self.lastind is None:
return
dataind = self.lastind
ax2.cla()
ax2.plot(X[dataind])
ax2.text(0.05, 0.9, 'mu=%1.3f\nsigma=%1.3f' % (xs[dataind], ys[dataind]),
transform=ax2.transAxes, va='top')
ax2.set_ylim(-0.5, 1.5)
self.selected.set_visible(True)
self.selected.set_data(xs[dataind], ys[dataind])
self.text.set_text('selected: %d' % dataind)
fig.canvas.draw()
if __name__ == '__main__':
import matplotlib.pyplot as plt
X = np.random.rand(100, 200)
xs = np.mean(X, axis=1)
ys = np.std(X, axis=1)
fig, (ax, ax2) = plt.subplots(2, 1)
ax.set_title('click on point to plot time series')
line, = ax.plot(xs, ys, 'o', picker=5) # 5 points tolerance
browser = PointBrowser()
fig.canvas.mpl_connect('pick_event', browser.onpick)
fig.canvas.mpl_connect('key_press_event', browser.onpress)
plt.show()
| mit |
nwillemse/misc-scripts | ib-downloader/ib-downloader2.py | 1 | 8772 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
ib-downloader.py
Created on Tue Jul 5 15:53:45 2016
@author: nwillemse
"""
import click
import time
import pandas as pd
from datetime import datetime
from ib.ext.Contract import Contract
from ib.opt import Connection
class Downloader:
def __init__(
self, tickers, exchange, ticker_type, expiry, barsize,
start_date, end_date, ib_client_id, ib_port
):
self.tickers = tickers
self.exchange = exchange
self.ticker_type = ticker_type
self.expiry = expiry
self.barsize = barsize
self.client_id = ib_client_id
self.order_id = 1
self.port = ib_port
self.currency = 'USD'
self.tws_conn = None
self.curr_ohlc = pd.DataFrame(
columns=['open', 'high', 'low', 'close', 'volume', 'open_interest']
)
self.no_data_error = False
self.got_hist_data = False
self.dates_list = self._get_trade_dates(start_date, end_date)
self.what_to_show = 'MIDPOINT' if ticker_type=='CASH' else 'TRADES'
def _get_trade_dates(self, start_dt=None, end_dt=None):
if self.ticker_type in ['CASH', 'FUT']:
dates = pd.date_range(start_dt, end_dt).tolist()
res = map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), dates)
res.sort(reverse=True)
print res
else:
fn = 'nyse_dates.txt'
print "Loading trading days from %s..." % fn
a = pd.read_csv(fn, parse_dates=['trade_date'])
sub = a[a.trade_date >= start_dt].trade_date
sub = sub[sub <= end_dt]
sub.sort_values(ascending=False, inplace=True)
res = sub.apply(lambda x: x.strftime('%Y-%m-%d')).values.tolist()
print "Loaded %s days from %s to %s" % (len(res), res[-1], res[0])
print res
return res
def error_handler(self, msg):
if msg.typeName == "error": # and msg.id != -1:
print "Server Error:", msg
if msg.errorCode == 162:
self.no_data_error = True
def server_handler(self, msg):
if msg.typeName == "nextValidId":
self.order_id = msg.orderId
elif msg.typeName == "managedAccounts":
self.account_code = msg.accountsList
print self.account_code
elif msg.typeName == "historicalData":
self.historical_data_event(msg)
elif msg.typeName == "error" and msg.id != -1:
return
# else:
# print msg.typeName, msg
def create_contract(self, symbol, sec_type, exch, curr, expiry):
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = sec_type
contract.m_exchange = exch
contract.m_currency = curr
contract.m_expiry = expiry
if sec_type=='FUT':
contract.m_includeExpired = 1
print "symbol:%s secType:%s exchange:%s currency:%s expiry:%s" % (
contract.m_symbol, contract.m_secType, contract.m_exchange,
contract.m_currency, contract.m_expiry
)
return contract
def historical_data_event(self, msg):
if msg.date.find('finished') == -1:
try:
date = datetime.strptime(msg.date, '%Y%m%d %H:%M:%S')
except Exception, e:
date = datetime.strptime(msg.date, '%Y%m%d')
self.curr_ohlc.loc[date] = msg.open, msg.high, msg.low, msg.close, \
msg.volume, msg.count
else:
self.got_hist_data = True
def connect_to_tws(self):
self.tws_conn = Connection.create(host='localhost',
port=self.port,
clientId=self.client_id)
self.tws_conn.connect()
time.sleep(2)
if not self.tws_conn.isConnected():
raise Exception("Unable to connect to TWS. Make sure the Gateway or TWS has been started. Port=%s ClientId=%s" % (self.port, self.client_id))
def disconnect_from_tws(self):
if self.tws_conn is not None:
self.tws_conn.disconnect()
def register_callback_functions(self):
print "Registering callback functions..."
# Assign server messages handling function.
self.tws_conn.registerAll(self.server_handler)
# Assign error handling function.
self.tws_conn.register(self.error_handler, 'Error')
def request_historical_data(self, symbol_id, symbol):
contract = self.create_contract(symbol,
self.ticker_type,
self.exchange,
self.currency,
self.expiry)
for dt in self.dates_list:
self.got_hist_data = False
self.no_data_error = False
req_dt = datetime.strptime(dt, '%Y-%m-%d %H:%M:%S')
end_dt = req_dt.strftime('%Y%m%d %H:%M:%S')
print "Requesting history for %s on %s..." % (symbol, req_dt)
self.tws_conn.reqHistoricalData(symbol_id,
contract,
endDateTime=end_dt,
durationStr='1 D',
barSizeSetting=self.barsize,
whatToShow=self.what_to_show,
useRTH=0,
formatDate=1)
while not self.got_hist_data and not self.no_data_error:
time.sleep(1)
if self.no_data_error:
self.no_data_error = False
print "no data found for this day, continuing..."
continue
time.sleep(8)
def start(self):
try:
print "Connecing to tws..."
self.connect_to_tws()
self.register_callback_functions()
for ticker in self.tickers:
print "Request historical data for %s" % ticker
self.request_historical_data(1, ticker)
self.curr_ohlc.sort_index(ascending=False, inplace=True)
self.curr_ohlc.index.name = 'datetime'
if self.ticker_type=='CASH':
filename = ticker + '.' + self.currency + '.csv'
else:
filename = ticker + '.csv'
self.curr_ohlc.to_csv('data/' + filename)
except Exception, e:
print "Error:", e
finally:
print "disconnected"
self.disconnect_from_tws()
@click.command()
@click.option('--tickers', '-t', default='SPY',
help='Comma separated list of tickers. Default="SPY"')
@click.option('--exchange', '-x', default='GLOBEX',
help='Comma separated list of tickers. Default="SPY"')
@click.option('--tickertype', '-tt', default='STK',
help='Type of tickers (STK, FUT or CASH). Defaul="STK"')
@click.option('--expiry', '-e',
help='The expiry when FUT ticker type. Default=None')
@click.option('--barsize', '-bs', default='15 mins',
help='Barsize of downloaded data. Default="15 mins"')
@click.option('--startdate', '-sd', default='2015-04-20',
help='Starting date for data download (YYYY-MM-DD).')
@click.option('--enddate', '-ed', default='2015-05-04',
help='Ending date for data download (YYYY-MM-DD).')
@click.option('--ib_client_id', '-c', default=200,
help='IB Client Id.')
@click.option('--ib_port', '-p', default=4001,
help='IB API Port.')
def main(tickers, exchange, tickertype, expiry, barsize, startdate,
enddate, ib_client_id, ib_port
):
"""
IB Downloader downloads data from Interactive Brokers for the specified
list of tickers.
"""
start_dt = datetime.strptime(startdate + ' 16:00:00', '%Y-%m-%d %H:%M:%S')
end_dt = datetime.strptime(enddate + ' 16:00:00', '%Y-%m-%d %H:%M:%S')
tickers = tickers.encode('ascii', 'ignore').split(',')
exchange = exchange.encode('ascii', 'ignore')
ticker_type = tickertype.encode('ascii', 'ignore')
barsize = barsize.encode('ascii', 'ignore')
if ticker_type == 'FUT':
expiry = expiry.encode('ascii', 'ignore')
print 'Tickers: %s' % tickers
system = Downloader(
tickers, exchange, ticker_type, expiry, barsize,
start_dt, end_dt, ib_client_id, ib_port
)
system.start()
if __name__ == '__main__':
main()
| mit |
huzq/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 23 | 1405 | """
==============
SGD: Penalties
==============
Contours of where the penalty is equal to 1
for the three penalties L1, L2 and elastic-net.
All of the above are supported by :class:`~sklearn.linear_model.SGDClassifier`
and :class:`~sklearn.linear_model.SGDRegressor`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
l1_color = "navy"
l2_color = "c"
elastic_net_color = "darkorange"
line = np.linspace(-1.5, 1.5, 1001)
xx, yy = np.meshgrid(line, line)
l2 = xx ** 2 + yy ** 2
l1 = np.abs(xx) + np.abs(yy)
rho = 0.5
elastic_net = rho * l1 + (1 - rho) * l2
plt.figure(figsize=(10, 10), dpi=100)
ax = plt.gca()
elastic_net_contour = plt.contour(xx, yy, elastic_net, levels=[1],
colors=elastic_net_color)
l2_contour = plt.contour(xx, yy, l2, levels=[1], colors=l2_color)
l1_contour = plt.contour(xx, yy, l1, levels=[1], colors=l1_color)
ax.set_aspect("equal")
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
plt.clabel(elastic_net_contour, inline=1, fontsize=18,
fmt={1.0: 'elastic-net'}, manual=[(-1, -1)])
plt.clabel(l2_contour, inline=1, fontsize=18,
fmt={1.0: 'L2'}, manual=[(-1, -1)])
plt.clabel(l1_contour, inline=1, fontsize=18,
fmt={1.0: 'L1'}, manual=[(-1, -1)])
plt.tight_layout()
plt.show()
| bsd-3-clause |
cl4rke/scikit-learn | examples/model_selection/grid_search_digits.py | 227 | 2665 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
intel-analytics/analytics-zoo | pyzoo/zoo/chronos/model/tcmf/local_model.py | 1 | 24645 | # Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is adapted from the DeepGlo Project. https://github.com/rajatsen91/deepglo
#
# Note: This license has also been called the "New BSD License" or "Modified BSD License". See also
# the 2-clause BSD License.
#
# Copyright (c) 2019 The DeepGLO Project.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.nn.utils import weight_norm
from zoo.chronos.model.tcmf.data_loader import TCMFDataLoader
from zoo.chronos.model.tcmf.time import TimeCovariates
import logging
logger = logging.getLogger(__name__)
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, : -self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(
self,
n_inputs,
n_outputs,
kernel_size,
stride,
dilation,
padding,
dropout=0.1,
init=True,
):
super(TemporalBlock, self).__init__()
self.kernel_size = kernel_size
self.conv1 = weight_norm(
nn.Conv1d(
n_inputs,
n_outputs,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
)
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(
nn.Conv1d(
n_outputs,
n_outputs,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
)
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(
self.conv1,
self.chomp1,
self.relu1,
self.dropout1,
self.conv2,
self.chomp2,
self.relu2,
self.dropout2,
)
self.downsample = (
nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
)
self.init = init
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
if self.init:
nn.init.normal_(self.conv1.weight, std=1e-3)
nn.init.normal_(self.conv2.weight, std=1e-3)
self.conv1.weight[:, 0, :] += (
1.0 / self.kernel_size
) # new initialization scheme
self.conv2.weight += 1.0 / self.kernel_size # new initialization scheme
nn.init.normal_(self.conv1.bias, std=1e-6)
nn.init.normal_(self.conv2.bias, std=1e-6)
else:
nn.init.xavier_uniform_(self.conv1.weight)
nn.init.xavier_uniform_(self.conv2.weight)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.1)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalBlockLast(nn.Module):
def __init__(
self,
n_inputs,
n_outputs,
kernel_size,
stride,
dilation,
padding,
dropout=0.2,
init=True,
):
super(TemporalBlockLast, self).__init__()
self.kernel_size = kernel_size
self.conv1 = weight_norm(
nn.Conv1d(
n_inputs,
n_outputs,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
)
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(
nn.Conv1d(
n_outputs,
n_outputs,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
)
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(
self.conv1,
self.chomp1,
self.dropout1,
self.conv2,
self.chomp2,
self.dropout2,
)
self.downsample = (
nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
)
self.init = init
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
if self.init:
nn.init.normal_(self.conv1.weight, std=1e-3)
nn.init.normal_(self.conv2.weight, std=1e-3)
self.conv1.weight[:, 0, :] += (
1.0 / self.kernel_size
) # new initialization scheme
self.conv2.weight += 1.0 / self.kernel_size # new initialization scheme
nn.init.normal_(self.conv1.bias, std=1e-6)
nn.init.normal_(self.conv2.bias, std=1e-6)
else:
nn.init.xavier_uniform_(self.conv1.weight)
nn.init.xavier_uniform_(self.conv2.weight)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.1)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return out + res
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.1, init=True):
super(TemporalConvNet, self).__init__()
layers = []
self.num_channels = num_channels
self.num_inputs = num_inputs
self.kernel_size = kernel_size
self.dropout = dropout
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i - 1]
out_channels = num_channels[i]
if i == num_levels - 1:
layers += [
TemporalBlockLast(
in_channels,
out_channels,
kernel_size,
stride=1,
dilation=dilation_size,
padding=(kernel_size - 1) * dilation_size,
dropout=dropout,
init=init,
)
]
else:
layers += [
TemporalBlock(
in_channels,
out_channels,
kernel_size,
stride=1,
dilation=dilation_size,
padding=(kernel_size - 1) * dilation_size,
dropout=dropout,
init=init,
)
]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
class LocalModel(object):
def __init__(
self,
Ymat,
num_inputs=1,
num_channels=[32, 32, 32, 32, 32, 1],
kernel_size=7,
dropout=0.2,
vbsize=300,
hbsize=128,
lr=0.0005,
val_len=10,
test=True,
end_index=120,
normalize=False,
start_date="2016-1-1",
freq="H",
covariates=None,
use_time=False,
dti=None,
Ycov=None,
):
"""
Arguments:
Ymat: input time-series n*T
num_inputs: always set to 1
num_channels: list containing channel progression of temporal comvolution network
kernel_size: kernel size of temporal convolution filters
dropout: dropout rate for each layer
vbsize: vertical batch size
hbsize: horizontal batch size
lr: learning rate
val_len: validation length
test: always set to True
end_index: no data is touched fro training or validation beyond end_index
normalize: normalize dataset before training or not
start_data: start data in YYYY-MM-DD format (give a random date if unknown)
freq: "H" hourly, "D": daily and for rest see here:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
# timeseries-offset-aliases
covariates: global covariates common for all time series r*T,
where r is the number of covariates
Ycov: per time-series covariates n*l*T, l such covariates per time-series
use_time: if false, default trime-covriates are not used
dti: date time object can be explicitly supplied here, leave None if default options are
to be used
"""
self.start_date = start_date
if use_time:
self.time = TimeCovariates(
start_date=start_date, freq=freq, normalized=True, num_ts=Ymat.shape[1]
)
if dti is not None:
self.time.dti = dti
time_covariates = self.time.get_covariates()
if covariates is None:
self.covariates = time_covariates
else:
self.covariates = np.vstack([time_covariates, covariates])
else:
self.covariates = covariates
self.Ycov = Ycov
self.freq = freq
self.vbsize = vbsize
self.hbsize = hbsize
self.num_inputs = num_inputs
self.num_channels = num_channels
self.lr = lr
self.val_len = val_len
self.Ymat = Ymat
self.end_index = end_index
self.normalize = normalize
self.kernel_size = kernel_size
self.dropout = dropout
if normalize:
Y = Ymat
m = np.mean(Y[:, 0: self.end_index], axis=1)
s = np.std(Y[:, 0: self.end_index], axis=1)
# s[s == 0] = 1.0
s += 1.0
Y = (Y - m[:, None]) / s[:, None]
mini = np.abs(np.min(Y))
self.Ymat = Y + mini
self.m = m
self.s = s
self.mini = mini
if self.Ycov is not None:
self.num_inputs += self.Ycov.shape[1]
if self.covariates is not None:
self.num_inputs += self.covariates.shape[0]
self.seq = TemporalConvNet(
num_inputs=self.num_inputs,
num_channels=num_channels,
kernel_size=kernel_size,
dropout=dropout,
init=True,
)
self.seq = self.seq.float()
self.D = TCMFDataLoader(
Ymat=self.Ymat,
vbsize=vbsize,
hbsize=hbsize,
end_index=end_index,
val_len=val_len,
covariates=self.covariates,
Ycov=self.Ycov,
)
self.val_len = val_len
def train_model(self, num_epochs=300,
num_workers=1,
early_stop=False, tenacity=10):
if num_workers == 1:
return self.train_model_local(num_epochs=num_epochs,
early_stop=early_stop,
tenacity=tenacity)
else:
from zoo.chronos.model.tcmf.local_model_distributed_trainer import\
train_yseq_hvd
import ray
# check whether there has been an activate ray context yet.
from zoo.ray import RayContext
ray_ctx = RayContext.get()
Ymat_id = ray.put(self.Ymat)
covariates_id = ray.put(self.covariates)
Ycov_id = ray.put(self.Ycov)
trainer_config_keys = ["vbsize", "hbsize", "end_index", "val_len", "lr",
"num_inputs", "num_channels", "kernel_size", "dropout"]
trainer_config = {k: self.__dict__[k] for k in trainer_config_keys}
model, val_loss = train_yseq_hvd(epochs=num_epochs,
workers_per_node=num_workers // ray_ctx.num_ray_nodes,
Ymat_id=Ymat_id,
covariates_id=covariates_id,
Ycov_id=Ycov_id,
**trainer_config)
self.seq = model
return val_loss
@staticmethod
def loss(out, target):
criterion = nn.L1Loss()
return criterion(out, target) / torch.abs(target.data).mean()
def train_model_local(self, num_epochs=300, early_stop=False, tenacity=10):
"""
early_stop: set true for using early stop
tenacity: patience for early_stop
"""
print("Training Local Model(Tconv)")
optimizer = optim.Adam(params=self.seq.parameters(), lr=self.lr)
iter_count = 0
loss_all = []
min_val_loss = float("inf")
scount = 0
val_loss = 0
inp_test, out_target_test, _, _ = self.D.supply_test()
while self.D.epoch < num_epochs:
last_epoch = self.D.epoch
inp, out_target, _, _ = self.D.next_batch()
current_epoch = self.D.epoch
inp = Variable(inp)
out_target = Variable(out_target)
optimizer.zero_grad()
out = self.seq(inp)
loss = LocalModel.loss(out, out_target)
iter_count = iter_count + 1
for p in self.seq.parameters():
p.requires_grad = True
loss.backward()
for p in self.seq.parameters():
p.grad.data.clamp_(max=1e5, min=-1e5)
optimizer.step()
loss_all = loss_all + [loss.item()]
if current_epoch > last_epoch:
# validate:
inp_test = Variable(inp_test)
out_target_test = Variable(out_target_test)
out_test = self.seq(inp_test)
val_loss = LocalModel.loss(out_test, out_target_test).item()
print("Entering Epoch:{}".format(current_epoch))
print("Train Loss:{}".format(np.mean(loss_all)))
print("Validation Loss:{}".format(val_loss))
if val_loss <= min_val_loss:
min_val_loss = val_loss
scount = 0
self.saved_seq = pickle.loads(pickle.dumps(self.seq))
else:
scount += 1
if scount > tenacity and early_stop:
self.seq = self.saved_seq
break
return val_loss
@staticmethod
def convert_to_input(data):
n, m = data.shape
inp = torch.from_numpy(data).view(1, n, m)
inp = inp.transpose(0, 1).float()
return inp
@staticmethod
def convert_covariates(data, covs):
nd, td = data.shape
rcovs = np.repeat(
covs.reshape(1, covs.shape[0], covs.shape[1]), repeats=nd, axis=0
)
rcovs = torch.from_numpy(rcovs).float()
return rcovs
@staticmethod
def convert_ycovs(data, ycovs):
ycovs = torch.from_numpy(ycovs).float()
return ycovs
@staticmethod
def convert_from_output(T):
out = T.view(T.size(0), T.size(2))
return np.array(out.detach())
@staticmethod
def predict_future_batch(
data, covariates=None, ycovs=None, future=10, model=None,
):
# init inp, cov, ycovs for Local model
valid_cov = covariates is not None
inp = LocalModel.convert_to_input(data)
if valid_cov:
cov = LocalModel.convert_covariates(data, covariates)
inp = torch.cat((inp, cov[:, :, 0: inp.size(2)]), 1)
if ycovs is not None:
ycovs = LocalModel.convert_ycovs(data, ycovs)
inp = torch.cat((inp, ycovs[:, :, 0: inp.size(2)]), 1)
ci = inp.size(2)
for i in range(future):
out = model(inp)
output = out[:, :, out.size(2) - 1].view(out.size(0), out.size(1), 1)
if valid_cov:
output = torch.cat(
(output, cov[:, :, ci].view(cov.size(0), cov.size(1), 1)), 1
)
if ycovs is not None:
output = torch.cat(
(output, ycovs[:, :, ci].view(ycovs.size(0), ycovs.size(1), 1)), 1
)
out = torch.cat((inp, output), dim=2)
inp = out
ci += 1
out = out[:, 0, :].view(out.size(0), 1, out.size(2))
y = LocalModel.convert_from_output(out)
return y
@staticmethod
def _predict_future(data, ycovs, covariates, model, future, I):
out = None
for i in range(len(I) - 1):
bdata = data[range(I[i], I[i + 1]), :]
batch_ycovs = ycovs[range(I[i], I[i + 1]), :, :] \
if ycovs is not None else None
cur_out = LocalModel.predict_future_batch(
bdata, covariates, batch_ycovs, future, model,
)
out = np.vstack([out, cur_out]) if out is not None else cur_out
return out
def predict_future(
self,
data_in,
covariates=None,
ycovs=None,
future=10,
bsize=40,
normalize=False,
num_workers=1,
):
"""
data_in: input past data in same format of Ymat
covariates: input past covariates
ycovs: input past individual covariates
future: number of time-points to predict
bsize: batch size for processing (determine according to gopu memory limits)
normalize: should be set according to the normalization used in the class initialization
num_workers: number of workers to run prediction. if num_workers > 1, then prediction will
run in distributed mode and there has to be an activate RayContext.
"""
with torch.no_grad():
if normalize:
data = (data_in - self.m[:, None]) / self.s[:, None]
data += self.mini
else:
data = data_in
n, T = data.shape
I = list(np.arange(0, n, bsize))
I.append(n)
model = self.seq
if num_workers > 1:
import ray
import math
batch_num_per_worker = math.ceil(len(I) / num_workers)
indexes = [I[i:i + batch_num_per_worker + 1] for i in
range(0, len(I) - 1, batch_num_per_worker)]
logger.info(f"actual number of workers used in prediction is {len(indexes)}")
data_id = ray.put(data)
covariates_id = ray.put(covariates)
ycovs_id = ray.put(ycovs)
model_id = ray.put(model)
@ray.remote
def predict_future_worker(I):
data = ray.get(data_id)
covariates = ray.get(covariates_id)
ycovs = ray.get(ycovs_id)
model = ray.get(model_id)
out = LocalModel._predict_future(data, ycovs, covariates, model, future, I)
return out
remote_out = ray.get([predict_future_worker
.remote(index)
for index in indexes])
out = np.concatenate(remote_out, axis=0)
else:
out = LocalModel._predict_future(data, ycovs, covariates, model, future, I)
if normalize:
temp = (out - self.mini) * self.s[:, None] + self.m[:, None]
out = temp
return out
def rolling_validation(self, Ymat, tau=24, n=7, bsize=90, alpha=0.3):
last_step = Ymat.shape[1] - tau * n
rg = 1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels) - 1)
self.seq = self.seq.eval()
if self.covariates is not None:
covs = self.covariates[:, last_step - rg: last_step + tau]
else:
covs = None
if self.Ycov is not None:
ycovs = self.Ycov[:, :, last_step - rg: last_step + tau]
else:
ycovs = None
data_in = Ymat[:, last_step - rg: last_step]
out = self.predict_future(
data_in,
covariates=covs,
ycovs=ycovs,
future=tau,
bsize=bsize,
normalize=self.normalize,
)
predicted_values = []
actual_values = []
S = out[:, -tau::]
predicted_values += [S]
R = Ymat[:, last_step: last_step + tau]
actual_values += [R]
print("Current window wape:{}".format(wape(S, R)))
for i in range(n - 1):
last_step += tau
rg = 1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels) - 1)
if self.covariates is not None:
covs = self.covariates[:, last_step - rg: last_step + tau]
else:
covs = None
if self.Ycov is not None:
ycovs = self.Ycov[:, :, last_step - rg: last_step + tau]
else:
ycovs = None
data_in = Ymat[:, last_step - rg: last_step]
out = self.predict_future(
data_in,
covariates=covs,
ycovs=ycovs,
future=tau,
bsize=bsize,
normalize=self.normalize,
)
S = out[:, -tau::]
predicted_values += [S]
R = Ymat[:, last_step: last_step + tau]
actual_values += [R]
print("Current window wape:{}".format(wape(S, R)))
predicted = np.hstack(predicted_values)
actual = np.hstack(actual_values)
dic = {}
dic["wape"] = wape(predicted, actual)
dic["mape"] = mape(predicted, actual)
dic["smape"] = smape(predicted, actual)
dic["mae"] = np.abs(predicted - actual).mean()
dic["rmse"] = np.sqrt(((predicted - actual) ** 2).mean())
dic["nrmse"] = dic["rmse"] / np.sqrt(((actual) ** 2).mean())
baseline = Ymat[:, Ymat.shape[1] - n * tau - tau: Ymat.shape[1] - tau]
dic["baseline_wape"] = wape(baseline, actual)
dic["baseline_mape"] = mape(baseline, actual)
dic["baseline_smape"] = smape(baseline, actual)
return dic
| apache-2.0 |
tawsifkhan/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
indhub/mxnet | docs/mxdoc.py | 2 | 13330 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A sphnix-doc plugin to build mxnet docs"""
import subprocess
import re
import os
import json
import sys
from recommonmark import transform
import pypandoc
# import StringIO from io for python3 compatibility
from io import StringIO
import contextlib
# white list to evaluate the code block output, such as ['tutorials/gluon']
_EVAL_WHILTELIST = []
# start or end of a code block
_CODE_MARK = re.compile('^([ ]*)```([\w]*)')
# language names and the according file extensions and comment symbol
_LANGS = {'python' : ('py', '#'),
'r' : ('R','#'),
'scala' : ('scala', '#'),
'julia' : ('jl', '#'),
'perl' : ('pl', '#'),
'cpp' : ('cc', '//'),
'bash' : ('sh', '#')}
_LANG_SELECTION_MARK = 'INSERT SELECTION BUTTONS'
_SRC_DOWNLOAD_MARK = 'INSERT SOURCE DOWNLOAD BUTTONS'
def _run_cmd(cmds):
"""Run commands, raise exception if failed"""
if not isinstance(cmds, str):
cmds = "".join(cmds)
print("Execute \"%s\"" % cmds)
try:
subprocess.check_call(cmds, shell=True)
except subprocess.CalledProcessError as err:
print(err)
raise err
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir)
def build_mxnet(app):
"""Build mxnet .so lib"""
if not os.path.exists(os.path.join(app.builder.srcdir, '..', 'config.mk')):
_run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) DEBUG=1" %
app.builder.srcdir)
else:
_run_cmd("cd %s/.. && make -j$(nproc) DEBUG=1" %
app.builder.srcdir)
def build_r_docs(app):
"""build r pdf"""
r_root = app.builder.srcdir + '/../R-package'
pdf_path = root_path + '/docs/api/r/mxnet-r-reference-manual.pdf'
_run_cmd('cd ' + r_root +
'; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path)
dest_path = app.builder.outdir + '/api/r/'
_run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path)
def build_scala_docs(app):
"""build scala doc and then move the outdir"""
scala_path = app.builder.srcdir + '/../scala-package'
# scaldoc fails on some apis, so exit 0 to pass the check
_run_cmd('cd ..; make scalapkg')
_run_cmd('cd ' + scala_path + '; scaladoc `find . -type f -name "*.scala" | egrep \"\/core|\/infer\" | egrep -v \"Suite\"`; exit 0')
dest_path = app.builder.outdir + '/api/scala/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
scaladocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
_run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path)
def _convert_md_table_to_rst(table):
"""Convert a markdown table to rst format"""
if len(table) < 3:
return ''
out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n'
for i,l in enumerate(table):
cols = l.split('|')[1:-1]
if i == 0:
ncol = len(cols)
else:
if len(cols) != ncol:
return ''
if i == 1:
for c in cols:
if len(c) is not 0 and '---' not in c:
return ''
else:
for j,c in enumerate(cols):
out += ' * - ' if j == 0 else ' - '
out += pypandoc.convert_text(
c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n'
out += '```\n'
return out
def convert_table(app, docname, source):
"""Find tables in a markdown and then convert them into the rst format"""
num_tables = 0
for i,j in enumerate(source):
table = []
output = ''
in_table = False
for l in j.split('\n'):
r = l.strip()
if r.startswith('|'):
table.append(r)
in_table = True
else:
if in_table is True:
converted = _convert_md_table_to_rst(table)
if converted is '':
print("Failed to convert the markdown table")
print(table)
else:
num_tables += 1
output += converted
in_table = False
table = []
output += l + '\n'
source[i] = output
if num_tables > 0:
print('Converted %d tables in %s' % (num_tables, docname))
def _parse_code_lines(lines):
"""A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
"""
in_code = False
lang = None
indent = None
for l in lines:
m = _CODE_MARK.match(l)
if m is not None:
if not in_code:
if m.groups()[1].lower() in _LANGS:
lang = m.groups()[1].lower()
indent = len(m.groups()[0])
in_code = True
yield (l, in_code, lang, indent)
else:
yield (l, in_code, lang, indent)
lang = None
indent = None
in_code = False
else:
yield (l, in_code, lang, indent)
def _get_lang_selection_btn(langs):
active = True
btngroup = '<div class="text-center">\n<div class="btn-group opt-group" role="group">'
for l in langs:
btngroup += '<button type="button" class="btn btn-default opt %s">%s</button>\n' % (
'active' if active else '', l[0].upper()+l[1:].lower())
active = False
btngroup += '</div>\n</div> <script type="text/javascript" src="../../_static/js/options.js"></script>'
return btngroup
def _get_blocks(lines):
"""split lines into code and non-code blocks
Returns
-------
iterator of (bool, str, list of str)
- if it is a code block
- source language
- lines of source
"""
cur_block = []
pre_lang = None
pre_in_code = None
for (l, in_code, cur_lang, _) in _parse_code_lines(lines):
if in_code != pre_in_code:
if pre_in_code and len(cur_block) >= 2:
cur_block = cur_block[1:-1] # remove ```
# remove empty lines at head
while len(cur_block) > 0:
if len(cur_block[0]) == 0:
cur_block.pop(0)
else:
break
# remove empty lines at tail
while len(cur_block) > 0:
if len(cur_block[-1]) == 0:
cur_block.pop()
else:
break
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
cur_block = []
cur_block.append(l)
pre_lang = cur_lang
pre_in_code = in_code
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
def _get_mk_code_block(src, lang):
"""Return a markdown code block
E.g.
```python
import mxnet
````
"""
if lang is None:
lang = ''
return '```'+lang+'\n'+src.rstrip()+'\n'+'```\n'
@contextlib.contextmanager
def _string_io():
oldout = sys.stdout
olderr = sys.stderr
strio = StringIO.StringIO()
sys.stdout = strio
sys.stderr = strio
yield strio
sys.stdout = oldout
sys.stderr = olderr
def _get_python_block_output(src, global_dict, local_dict):
"""Evaluate python source codes
Returns
(bool, str):
- True if success
- output
"""
src = '\n'.join([l for l in src.split('\n')
if not l.startswith('%') and not 'plt.show()' in l])
ret_status = True
err = ''
with _string_io() as s:
try:
exec(src, global_dict, global_dict)
except Exception as e:
err = str(e)
ret_status = False
return (ret_status, s.getvalue()+err)
def _get_jupyter_notebook(lang, all_lines):
cells = []
# Exclude lines containing <!--notebook-skip-line-->
filtered_lines = [line for line in all_lines if "<!--notebook-skip-line-->" not in line]
for in_code, blk_lang, lines in _get_blocks(filtered_lines):
if blk_lang != lang:
in_code = False
src = '\n'.join(lines)
cell = {
"cell_type": "code" if in_code else "markdown",
"metadata": {},
"source": src
}
if in_code:
cell.update({
"outputs": [],
"execution_count": None,
})
cells.append(cell)
ipynb = {"nbformat" : 4,
"nbformat_minor" : 2,
"metadata" : {"language":lang, "display_name":'', "name":''},
"cells" : cells}
return ipynb
def _get_source(lang, lines):
cmt = _LANGS[lang][1] + ' '
out = []
for in_code, lines in _get_blocks(lang, lines):
if in_code:
out.append('')
for l in lines:
if in_code:
if '%matplotlib' not in l:
out.append(l)
else:
if ('<div>' in l or '</div>' in l or
'<script>' in l or '</script>' in l or
'<!--' in l or '-->' in l or
'%matplotlib' in l ):
continue
out.append(cmt+l)
if in_code:
out.append('')
return out
def _get_src_download_btn(out_prefix, langs, lines):
btn = '<div class="btn-group" role="group">\n'
for lang in langs:
ipynb = out_prefix
if lang == 'python':
ipynb += '.ipynb'
else:
ipynb += '_' + lang + '.ipynb'
with open(ipynb, 'w') as f:
json.dump(_get_jupyter_notebook(lang, lines), f)
f = ipynb.split('/')[-1]
btn += '<div class="download-btn"><a href="%s" download="%s">' \
'<span class="glyphicon glyphicon-download-alt"></span> %s</a></div>' % (f, f, f)
btn += '</div>\n'
return btn
def add_buttons(app, docname, source):
out_prefix = app.builder.outdir + '/' + docname
dirname = os.path.dirname(out_prefix)
if not os.path.exists(dirname):
os.makedirs(dirname)
for i,j in enumerate(source):
local_dict = {}
global_dict = {}
lines = j.split('\n')
langs = set([l for (_, _, l, _) in _parse_code_lines(lines)
if l is not None and l in _LANGS])
# first convert
for k,l in enumerate(lines):
if _SRC_DOWNLOAD_MARK in l:
lines[k] = _get_src_download_btn(
out_prefix, langs, lines)
# # then add lang buttons
# for k,l in enumerate(lines):
# if _LANG_SELECTION_MARK in l:
# lines[k] = _get_lang_selection_btn(langs)
output = ''
for in_code, lang, lines in _get_blocks(lines):
src = '\n'.join(lines)+'\n'
if in_code:
output += _get_mk_code_block(src, lang)
if lang == 'python' and any([w in docname for w in _EVAL_WHILTELIST]):
status, blk_out = _get_python_block_output(src, global_dict, local_dict)
if len(blk_out):
output += '<div class=\"cell-results-header\">Output:</div>\n\n'
output += _get_mk_code_block(blk_out, 'results')
else:
output += src
source[i] = output
# source[i] = '\n'.join(lines)
def setup(app):
# If MXNET_DOCS_BUILD_MXNET is set something different than 1
# Skip the build step
if os.getenv('MXNET_DOCS_BUILD_MXNET', '1') == '1':
app.connect("builder-inited", build_mxnet)
app.connect("builder-inited", generate_doxygen)
app.connect("builder-inited", build_scala_docs)
# skipped to build r, it requires to install latex, which is kinds of too heavy
# app.connect("builder-inited", build_r_docs)
app.connect('source-read', convert_table)
app.connect('source-read', add_buttons)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: 'http://mxnet.io/' + url,
'enable_eval_rst': True,
}, True)
app.add_transform(transform.AutoStructify)
| apache-2.0 |
bo-yang/stock_analysis | symbol.py | 1 | 53489 | from stock_analysis.utils import *
# conda install -c conda-forge selenium=3.0.1
from selenium import webdriver
def parse_google_financial_table(tables, keyword=None):
"""
Parse Google Financial table into DataFrame.
tables - selenium.webdriver.remote.webelement.WebElement
"""
tbl = None
for t in tables:
if len(t.text) <= 1:
continue
if keyword != None and keyword not in t.text:
continue
else:
tbl = t
break
if tbl == None:
return DataFrame()
lines = tbl.text.strip().splitlines()
# Get quaters from the first row, e.g.
# 'In Millions of USD (except for per share items) 3 months ending 2016-10-31 3 months ending 2016-07-31 3 months ending 2016-04-30 3 months ending 2016-01-31 3 months ending 2015-10-31'
quarters=re.findall(r'([0-9]+-[0-9]+-[0-9]+)', lines[0])
# Store the following lines into DataFrame
rows = list()
for line in lines[1:]:
l = line.strip().split(' ')
values = l[-len(quarters):] # the right part
key = ' '.join(l[:-len(quarters)]) # the left part
rows.append([key] + values)
colstr = ['Entries'] + quarters
fin_df = DataFrame(rows, columns=colstr)
fin_df.drop_duplicates(inplace=True)
fin_df.set_index('Entries', inplace=True)
return fin_df
class Symbol:
"""
Class of a stock symbol.
"""
def __init__(self, sym, name=None, start=DEFAULT_START_DATE, end=None, datapath='./data', loaddata=True):
self.sym = sym # ticker symbol, e.g. 'AAPL'
self.cik = str() # CIK
self.exch = None # stock exchange symbol, e.g. NMS, NYQ
self.quotes = DataFrame()
self.stats = DataFrame()
self.earnings = DataFrame()
self.income = DataFrame() # Income Statement
self.balance = DataFrame() # Balance Sheet
self.cashflow = DataFrame() # Cash Flow
self.name = name
if name != None:
self.datapath = os.path.normpath(datapath+'/'+name)
else:
self.datapath = os.path.normpath(datapath+'/'+sym)
self.edgarpath = os.path.normpath(self.datapath+'/edgar')
self.files = {'quotes':self.datapath + '/quotes.csv',
'stats':self.datapath + '/stats.csv',
'income':self.datapath + '/income.csv',
'balance':self.datapath + '/balance.csv',
'cashflow':self.datapath + '/cashflow.csv'}
[self.start_date, self.end_date] = parse_start_end_date(start, end)
if loaddata:
self.load_data(from_file=True)
def _handle_start_end_dates(self, start, end):
if start == None and end == None:
return [self.start_date, self.end_date]
else:
return parse_start_end_date(start, end)
def _adj_close(self):
if 'Adj Close' in self.quotes.columns:
return 'Adj Close'
elif 'Close' in self.quotes.columns:
return 'Close'
else:
return 'NA'
def get_quotes(self, start=None, end=None):
"""
Download history quotes from Yahoo or Google Finance.
Return Pandas DataFrame in the format of
Open High Low Close Volume Adj Close
Date
2004-06-23 15.000000 17.299999 14.75 17.200001 43574400 4.30
2004-06-24 17.549999 17.690001 16.50 16.760000 8887200 4.19
2004-06-25 16.510000 16.750000 15.79 15.800000 6710000 3.95
2004-06-28 16.000000 16.209999 15.44 16.000000 2270800 4.00
2004-06-29 16.000000 16.700001 15.83 16.400000 2112000 4.10
"""
[start_date, end_date] = self._handle_start_end_dates(start, end)
sym = self.sym
for n_tries in range(0,5): # try at most 5 times
try:
self.quotes = web.DataReader(sym, 'yahoo', start_date, end_date)
break
except:
print('Error: %s: failed to download historical quotes from Yahoo Finance, try Google Finance...' %sym)
try:
self.quotes = web.DataReader(sym, 'google', start_date, end_date)
break
except:
print('Error: %s: failed to download historical quotes from Google Finance.' %sym)
if self.quotes.empty:
if os.path.isfile(self.files['quotes']):
print('%s: loading quotes from %s' %(sym, self.files['quotes']))
self.quotes = pd.read_csv(self.files['quotes'], index_col='Date') # quotes manually downloaded
else:
print('!!!Error: %s: failed to download historical quotes!!!' %sym)
return None
# remove possible strings and convert to numbers
if self.quotes[self._adj_close()].dtypes != np.dtype('float64'):
m = self.quotes != 'null'
self.quotes = self.quotes.where(m, np.nan).dropna(how='any').astype(float)
self.start_date = to_date(self.quotes.first_valid_index()) # update start date
return self.quotes
def get_realtime_quote(self):
""" Get current price
Return the current price of this symbol.
"""
ticker = yf.YahooFinancials(self.sym)
return ticker.get_current_price()
def get_financials(self, exchange=None, browser=None):
"""
Download financial data from Google Finance.
The financial data are stored in *reversed* time order from left to right.
"""
if exchange == None:
if self.exch == None:
if 'Exchange' in self.stats.columns:
self.exch = self.stats['Exchange'][self.sym]
else:
# get exchange from Yahoo Finance
self.exch = get_symbol_exchange(self.sym)
exchange = get_exchange_by_sym(self.exch)
if exchange == None:
exchange = "NASDAQ" # Final resort, just a guess
# e.g. https://www.google.com/finance?q=NYSE%3ACRM&fstype=ii
site='https://www.google.com/finance?q=' + exchange + '%3A' + self.sym + '&fstype=ii'
close_browser = False
if browser == None:
browser=webdriver.Chrome()
close_browser = True
# set timeout
browser.set_page_load_timeout(75)
browser.set_script_timeout(60)
# Income Statement
try:
browser.get(site)
except:
print("%s: Download financial failed, try again..." %self.sym)
#time.sleep(1)
try:
browser.get(site)
except:
print("Error: %s: failed to get link: %s." %(self.sym, site))
if close_browser:
browser.quit()
return
try:
tables=browser.find_elements_by_id('fs-table')
except:
print('Error: timeout when finding \'fs-table\' for %s, exchange %s' %(self.sym, exchange))
if close_browser:
browser.quit()
return
if len(tables) < 1:
# Make sure the current page is the Financial page
try:
link=browser.find_element_by_link_text('Financials')
link.click()
tables=browser.find_elements_by_id('fs-table')
except:
print("Error: Financials link not found for %s, exchange %s" %(self.sym, exchange))
if close_browser:
browser.quit()
return
if len(tables) < 1:
print('Error: %s: failed to find income statement, exchange %s.' %(self.sym, exchange))
if close_browser:
browser.quit()
return
else:
self.income = parse_google_financial_table(tables, 'Revenue')
# Balance Sheet
link=browser.find_element_by_link_text('Balance Sheet')
link.click()
tables=browser.find_elements_by_id('fs-table')
tables=browser.find_elements_by_id('fs-table')
if len(tables) < 1:
print('Error: %s: failed to find balance sheet.' %self.sym)
if close_browser:
browser.quit()
return
else:
self.balance = parse_google_financial_table(tables, 'Total Assets')
# Cash Flow
link=browser.find_element_by_link_text('Cash Flow')
link.click()
tables=browser.find_elements_by_id('fs-table')
if len(tables) < 1:
print('Error: %s: failed to find cash flow.' %self.sym)
if close_browser:
browser.quit()
return
else:
self.cashflow = parse_google_financial_table(tables, 'Amortization')
if close_browser:
browser.quit()
return
def download_earning(self, baseurl, fname, form='10-Q', force_update=False):
"""
Download the specified earning report from given URL and save it to edgarpath/form/.
"""
form_path = self.edgarpath+'/'+form
if not os.path.isdir(form_path):
os.makedirs(form_path)
report = form_path+'/'+fname
if force_update or not os.path.isfile(report):
# download the data
url = baseurl+'/'+fname
try:
r = requests.get(url)
except requests.exceptions.RequestException as e:
print('%s: request failure:' %self.sym)
print(e)
return
if r.status_code != requests.codes.ok:
print('%s: earning download failure, status %d, link %s' %(self.sym, r.status_code, url))
# save the result anyway
with open(report, 'w') as f:
f.write(r.text)
f.close()
def load_earnings(self, form='10-Q'):
"""
Load all earning reports from dir edgarpath/form/.
"""
form_path = self.edgarpath+'/'+form
if not os.path.isdir(form_path):
print('Error: path %s not found' %form_path)
return DataFrame()
for form in os.listdir(form_path):
f = os.path.normpath(form_path+'/'+form)
earning = XBRL(f).fields
earning = DataFrame(list(earning.items()), columns=['Entries', earning['BalanceSheetDate']])
earning.set_index('Entries', inplace=True)
if self.earnings.empty:
self.earnings = earning
else:
self.earnings = self.earnings.join(earning)
return self.earnings
def load_data(self, from_file=True):
"""
Get stock data from file or web.
"""
if from_file:
if os.path.isfile(self.files['quotes']):
self.quotes = pd.read_csv(self.files['quotes'])
self.quotes.set_index('Date', inplace=True)
if os.path.isfile(self.files['stats']):
self.stats = pd.read_csv(self.files['stats'])
self.stats.set_index('Symbol', inplace=True)
else:
self.get_quotes()
self.get_stats()
#self.get_financials()
self.load_financial_data(from_file)
def load_financial_data(self, from_file=True):
"""
Load financial data from file or web.
"""
if from_file:
if os.path.isfile(self.files['income']):
self.income = pd.read_csv(self.files['income'])
self.income.set_index('Entries', inplace=True)
if os.path.isfile(self.files['balance']):
self.balance = pd.read_csv(self.files['balance'])
self.balance.set_index('Entries', inplace=True)
if os.path.isfile(self.files['cashflow']):
self.cashflow = pd.read_csv(self.files['cashflow'])
self.cashflow.set_index('Entries', inplace=True)
else:
self.get_financials()
def save_data(self):
"""
Save stock data into files.
"""
if not os.path.isdir(self.datapath):
os.makedirs(self.datapath)
if len(self.quotes) > 0:
self.quotes.to_csv(self.files['quotes'])
if len(self.stats) > 0:
self.stats.to_csv(self.files['stats'])
self.save_financial_data()
def save_financial_data(self):
"""
Save financial data.
"""
if not os.path.isdir(self.datapath):
os.makedirs(self.datapath)
if len(self.income) > 0:
self.income.to_csv(self.files['income'])
if len(self.balance) > 0:
self.balance.to_csv(self.files['balance'])
if len(self.cashflow) > 0:
self.cashflow.to_csv(self.files['cashflow'])
def return_on_investment(self, start=None, end=None, exclude_dividend=True):
"""
Calculate stock Return On Investiment(ROI, or Rate Of Return) for a given period.
Total Stock Return = ((P1 - P0) + D) / P0
where
P0 = Initial Stock Price
P1 = Ending Stock Price
D = Dividends
"""
if self.quotes.empty:
self.get_quotes()
if self.quotes.empty:
return 0
[start_date, end_date] = self._handle_start_end_dates(start, end)
adj_close = self.quotes.loc[start_date.strftime('%Y-%m-%d'):end_date.strftime('%Y-%m-%d'), self._adj_close()]
if self.quotes.empty or len(adj_close) < 1:
return -99999999
no_dividend = ('DividendYield' not in self.stats.columns) or np.isnan(self.stats['DividendYield'][self.sym])
if exclude_dividend or no_dividend:
dividend = 0
else:
# For simplicity, suppose the dividend yield is calculated as
# Dividend Yield = (Annual Dividends Per Share) / (Avg Price Per Share)
# This is not accurate and need to be enhanced.
dividend = self.stats['DividendYield'][self.sym] * adj_close.mean() / 100 # yearly dividend
dividend = dividend / 365 * (end_date-start_date).days # dividend in the range
start_price = adj_close[0]
end_price = adj_close[-1]
if type(start_price) == str:
start_price = str2num(start_price)
if type(end_price) == str:
end_price = str2num(end_price)
if start_price == 0 or np.isnan(start_price):
start_price = 0.00001
if end_price == 0 or np.isnan(end_price):
end_price == 0.00001
roi = (end_price - start_price + dividend) / start_price
return roi
def return_periodic(self, periods=6, freq='365D'):
"""
Calculate periodic average/median returns.
periods and freq are parameters passed to Pandas date_range().
"""
if self.quotes.empty:
self.get_quotes()
if self.quotes.empty:
return [np.nan, np.nan]
returns = []
start_date = self.quotes.first_valid_index()
end_date = self.quotes.last_valid_index()
[start_date, end_date] = self._handle_start_end_dates(start_date, end_date)
days = pd.date_range(end=end_date, periods=periods, freq=freq)[::-1] # The past (periods-1) periods in reverse order
for i in range(1, len(days)):
if days[i].date() < start_date:
break # out of boundary
#print('yearly: %s - %s' %(days[i].ctime(), days[i-1].ctime())) # FIXME: TEST
returns.append(self.return_on_investment(days[i], days[i-1], exclude_dividend=True))
if len(returns) > 0:
ret_avg = np.mean(returns)
ret_median = np.median(returns)
else:
print('Error: %s: failed to calculate periodic(%s) returns.' %(self.sym, freq))
ret_avg = np.nan
ret_median = np.nan
return [ret_avg, ret_median]
def growth_stats(self, exclude_dividend=False):
"""
Additional stats that calculated based on history price.
"""
labels = ['Symbol', 'LastWeekReturn', 'LastMonthReturn', 'LastQuarterReturn', 'HalfYearReturn', '1YearReturn', '2YearReturn', '3YearReturn', 'AvgMonthlyReturn', 'MedianMonthlyReturn', 'AvgQuarterlyReturn', 'MedianQuarterlyReturn', 'AvgYearlyReturn', 'MedianYearlyReturn', 'PriceIn52weekRange', 'LastQuarterGrowth', 'LastYearGrowth']
if self.quotes.empty:
self.get_quotes()
if self.quotes.empty:
# Failed to get history quotes, insert position holders.
st = np.zeros(len(labels)) - 99999999
stats = DataFrame([st.tolist()], columns=labels)
return stats
[end_date, one_week_ago, one_month_ago, three_month_ago, half_year_ago, one_year_ago, two_year_ago, three_year_ago, five_year_ago] = get_stats_intervals(self.end_date)
last_week_return = self.return_on_investment(one_week_ago, end_date, exclude_dividend)
last_month_return = self.return_on_investment(one_month_ago, end_date, exclude_dividend)
quarter_return = self.return_on_investment(three_month_ago, end_date, exclude_dividend)
half_year_return = self.return_on_investment(half_year_ago, end_date, exclude_dividend)
one_year_return = self.return_on_investment(one_year_ago, end_date, exclude_dividend)
two_year_return = self.return_on_investment(two_year_ago, end_date, exclude_dividend)
three_year_return = self.return_on_investment(three_year_ago, end_date, exclude_dividend)
[yearly_ret_avg, yearly_ret_median] = self.return_periodic(periods=6, freq='365D') # yearly returns in the past 5 years
[quart_ret_avg, quart_ret_median] = self.return_periodic(periods=13, freq='90D') # quarterly returns in the past 3 years
[monthly_ret_avg, monthly_ret_median] = self.return_periodic(periods=25, freq='30D') # monthly returns in the past 2 years
adj_close = self.quotes.loc[one_year_ago.strftime('%Y-%m-%d'):end_date.strftime('%Y-%m-%d'), self._adj_close()].dropna(how='all')
if not adj_close.empty and len(adj_close) > 0:
current = adj_close[-1]
last_year_growth = current - adj_close[0]
# Current price in 52-week range should between [0, 1] - larger number means more expensive.
pos_in_range = (current - adj_close.min()) / (adj_close.max() - adj_close.min())
adj_close = self.quotes.loc[three_month_ago.strftime('%Y-%m-%d'):end_date.strftime('%Y-%m-%d'), self._adj_close()].dropna(how='all')
if not adj_close.empty and len(adj_close) > 0:
last_quarter_growth = adj_close[-1] - adj_close[0]
else:
last_quarter_growth = np.nan
else:
pos_in_range = np.nan
last_year_growth = np.nan
last_quarter_growth = np.nan
st = [[self.sym, last_week_return, last_month_return, quarter_return, half_year_return, one_year_return, two_year_return,
three_year_return, monthly_ret_avg, monthly_ret_median, quart_ret_avg, quart_ret_median, yearly_ret_avg, yearly_ret_median,
pos_in_range, last_quarter_growth, last_year_growth]]
stats = DataFrame(st, columns=labels)
stats.drop_duplicates(inplace=True)
stats.set_index('Symbol', inplace=True)
return stats
def sma(self, n=20, start=None, end=None):
"""
Calculate the Simple Moving Average.
Return - pandas Series.
"""
[start_date, end_date] = self._handle_start_end_dates(start, end)
stock = self.quotes[self._adj_close()]
move_avg = pd.Series(moving_average(stock, n, type='simple'), index=stock.index)
return move_avg[start_date.strftime('%Y-%m-%d'):end_date.strftime('%Y-%m-%d')].dropna(how='all')
def ema(self, n=10, start=None, end=None):
"""
Exponential Moving Average(EMA)
Return - pandas Series.
"""
if self.quotes.empty:
self.get_quotes()
if self.quotes.empty:
return pd.Series()
[start_date, end_date] = self._handle_start_end_dates(start, end)
# EMA is start date sensitive
tmp_start = start_date - BDay(n) # The first n days are used for init, so go back for n business days
stock = self.quotes[self._adj_close()][tmp_start.strftime('%Y-%m-%d'):end_date.strftime('%Y-%m-%d')]
avg = pd.Series(moving_average(stock, n, type='exponential'), index=stock.index)
return avg[start_date.strftime('%Y-%m-%d'):end_date.strftime('%Y-%m-%d')].dropna(how='all')
def diverge_to_index(self, index, n=10, start=None, end=None):
"""
Calculate the diverge between this symbol and the given index.
Exponential moving average is used for smoothing the prices.
Inputs:
index - Symbol of index(e.g. sp500)
n - window passed to EMA
Return - mean diverages
"""
if self.quotes.empty:
self.get_quotes()
if index.quotes.empty:
index.get_quotes()
if self.quotes.empty or index.quotes.empty:
return np.nan
[start_date, end_date] = self._handle_start_end_dates(start, end)
# use the latest available starting date
start_date = max(to_date(self.quotes.first_valid_index()), to_date(index.quotes.first_valid_index()), start_date)
move_avg_index = index.ema(n, start_date, end_date).dropna(how='all')
move_avg_symbol = self.ema(n, start_date, end_date).dropna(how='all')
if move_avg_symbol.empty or move_avg_index.empty:
return np.nan
move_avg_index /= move_avg_index[0] # normalization
move_avg_symbol /= move_avg_symbol[0] # normalization
diff = move_avg_symbol - move_avg_index
if diff.empty:
return np.nan
else:
return diff.mean()
def relative_growth(self, index, start=None, end=None):
"""
Percentage of price change relative to the given index.
"""
if self.quotes.empty:
self.get_quotes()
if index.quotes.empty:
index.get_quotes()
if self.quotes.empty or index.quotes.empty:
return np.nan
[start_date, end_date] = self._handle_start_end_dates(start, end)
# use the latest available starting date
start_date = max(to_date(self.quotes.first_valid_index()), to_date(index.quotes.first_valid_index()), start_date)
stock_quote = self.quotes[self._adj_close()][start_date.strftime('%Y-%m-%d'):end_date.strftime('%Y-%m-%d')].dropna(how='all')
index_quote = index.quotes[self._adj_close()][start_date.strftime('%Y-%m-%d'):end_date.strftime('%Y-%m-%d')].dropna(how='all')
if stock_quote.empty or index_quote.empty:
return np.nan
stock_growth = str2num(stock_quote[-1]) / str2num(stock_quote[0])
index_growth = str2num(index_quote[-1]) / str2num(index_quote[0])
return stock_growth / index_growth
def _relative_average_periodic(self, index, start_date, end_date, periods, freq, cb, median=True):
"""
Average of relative growth for given periods
"""
growths = []
days = pd.date_range(end=end_date, periods=periods, freq=freq)[::-1] # in reverse order
for i in range(1, len(days)):
if days[i].date() < start_date:
break # out of boundary
diff = cb(index, start=days[i], end=days[i-1])
if np.isnan(diff):
break
else:
growths.append(diff)
if median:
return np.median(growths)
else:
return np.mean(growths)
def relative_growth_stats(self, index=None):
"""
Calculate stats of relative growth to S&P 500.
index: a Symbol class of index, e.g. S&P 500.
"""
if index == None:
index = Symbol('^GSPC', name='SP500', loaddata=False) # S&P500
if self.quotes.empty:
self.get_quotes()
if index.quotes.empty:
index.get_quotes()
if self.quotes.empty or index.quotes.empty:
return DataFrame()
labels = ['Symbol', 'RelativeGrowthLastWeek', 'RelativeGrowthLastMonth', 'RelativeGrowthLastQuarter', 'RelativeGrowthHalfYear', 'RelativeGrowthLastYear', 'RelativeGrowthLast2Years', 'RelativeGrowthLast3Years', 'WeeklyRelativeGrowth', 'MonthlyRelativeGrowth', 'QuarterlyRelativeGrowth', 'YearlyRelativeGrowth']
[end_date, one_week_ago, one_month_ago, three_month_ago, half_year_ago, one_year_ago, two_year_ago, three_year_ago, five_year_ago] = get_stats_intervals(self.end_date)
relative_growth_one_week = self.relative_growth(index, start=one_week_ago, end=end_date)
relative_growth_one_month = self.relative_growth(index, start=one_month_ago, end=end_date)
relative_growth_last_quarter = self.relative_growth(index, start=three_month_ago, end=end_date)
relative_growth_half_year = self.relative_growth(index, start=half_year_ago, end=end_date)
relative_growth_last_year = self.relative_growth(index, start=one_year_ago, end=end_date)
relative_growth_two_year = self.relative_growth(index, start=two_year_ago, end=end_date)
relative_growth_three_year = self.relative_growth(index, start=three_year_ago, end=end_date)
# periodic stats
start_date = max(to_date(self.quotes.first_valid_index()), to_date(index.quotes.first_valid_index()) )
weekly_rel_growth = self._relative_average_periodic(index, start_date, end_date, 13, '7D', self.relative_growth) # The past 3 months
monthly_rel_growth = self._relative_average_periodic(index, start_date, end_date, 13, '30D', self.relative_growth) # The past 12 months
quarterly_rel_growth = self._relative_average_periodic(index, start_date, end_date, 13, '90D', self.relative_growth) # The past 3 years
yearly_rel_growth = self._relative_average_periodic(index, start_date, end_date, 7, '365D', self.relative_growth) # The past 6 years
# 'QuarterlyDivergeIndex' and 'YearlyDivergeIndex'
#quarterly_diverge = self._relative_average_periodic(index, start_date, end_date, 13, '90D', self.diverge_to_index) # The past 3 years
#yearly_diverge = self._relative_average_periodic(index, start_date, end_date, 6, '365D', self.diverge_to_index) # The past 5 years
stats = [[self.sym, relative_growth_one_week, relative_growth_one_month, relative_growth_last_quarter, relative_growth_half_year,
relative_growth_last_year, relative_growth_two_year, relative_growth_three_year, weekly_rel_growth, monthly_rel_growth,
quarterly_rel_growth, yearly_rel_growth]]
stats_df = DataFrame(stats, columns=labels)
stats_df.drop_duplicates(inplace=True)
stats_df.set_index('Symbol', inplace=True)
return stats_df
def trend_stats(self):
"""
Get all the technical details of trend.
"""
if self.quotes.empty:
self.get_quotes()
if self.quotes.empty:
print('Error: %s: history quotes are not available.' %self.sym)
return DataFrame()
end_date = dt.date.today()
start_date = end_date - dt.timedelta(days=90)
one_month_ago = end_date - dt.timedelta(days=30)
labels = ['Symbol', 'ROC', 'ROC Trend 7D', 'ROC Trend 14D', 'RSI', 'MACD Diff', 'FSTO', 'SSTO', 'AvgFSTOLastMonth', 'AvgFSTOLastQuarter', 'VolumeChange']
roc = self.roc(start=start_date, end=end_date)
if roc.empty or len(roc) < 1:
roc_stat = np.nan
else:
roc_stat = roc[-1]
rsi = self.rsi(start=start_date, end=end_date)
if rsi.empty or len(rsi) < 1:
rsi_stat = np.nan
else:
rsi_stat = rsi[-1]
[macd, signal, diff] = self.macd(start=start_date, end=end_date)
if diff.empty or len(diff) < 1:
macd_stat = np.nan
else:
macd_stat = diff[-1]
[K,D] = self.stochastic(start=start_date, end=end_date)
if K.empty or len(K) < 1:
fsto_stat = np.nan
avg_fsto_past_month = np.nan
avg_fsto_past_quarter = np.nan
else:
fsto_stat = K[-1]
avg_fsto_past_month = K[one_month_ago.strftime('%Y-%m-%d'):end_date.strftime('%Y-%m-%d')].mean()
avg_fsto_past_quarter = K.mean()
if D.empty or len(D) < 1:
ssto_stat = np.nan
else:
ssto_stat = D[-1]
# ROC Trend
seven_days_ago = end_date - dt.timedelta(days=7)
forteen_days_ago = end_date - dt.timedelta(days=14)
roc_trend_7d = find_trend(roc[seven_days_ago.strftime('%Y-%m-%d'):end_date.strftime('%Y-%m-%d')])
roc_trend_14d = find_trend(roc[forteen_days_ago.strftime('%Y-%m-%d'):end_date.strftime('%Y-%m-%d')])
# Volume changes in the past two weeks
this_week_start = end_date - dt.timedelta(days=7)
last_week_end = end_date - dt.timedelta(days=8)
last_week_start = end_date - dt.timedelta(days=15)
volume_this_week = self.quotes['Volume'].loc[this_week_start.strftime('%Y-%m-%d'):].mean()
volume_last_week = self.quotes['Volume'].loc[last_week_start.strftime('%Y-%m-%d') : last_week_end.strftime('%Y-%m-%d')].mean()
if volume_last_week != 0:
volume_change = volume_this_week / volume_last_week
else:
volume_change = 999999
stats = [[self.sym, roc_stat, roc_trend_7d, roc_trend_14d, rsi_stat, macd_stat, fsto_stat, ssto_stat, avg_fsto_past_month, avg_fsto_past_quarter, volume_change]]
stats_df = DataFrame(stats, columns=labels)
stats_df.drop_duplicates(inplace=True)
stats_df.set_index('Symbol', inplace=True)
return stats_df
def financial_stats(self, exchange=None, browser=None, update=False):
"""
Calculate financial stats.
exchange: string of stock exchange, e.g. NASDAQ or NYSE
browser: selenium webdriver
update: force to update financial data from web
"""
if update: #FIXME: or self.income.empty or self.balance.empty or self.cashflow.empty:
self.get_financials(exchange=exchange, browser=browser)
self.save_financial_data()
else:
print('Loading financials under %s .' %self.datapath) # FIXME
self.load_financial_data()
labels = ['Symbol', 'RevenueMomentum', 'ProfitMargin', 'AvgProfitMargin', 'ProfitMarginMomentum', 'OperatingMargin', 'AvgOperatingMargin', 'OperatingMarginMomentum', 'AssetMomentum', 'Debt/Assets', 'Avg Debt/Assets', 'Debt/Assets Momentum', 'OperatingCashMomentum', 'InvestingCashMomentum', 'FinancingCashMomentum']
net_income = pd.Series()
operate_income = pd.Series()
revenue = pd.Series()
total_assets = pd.Series()
total_debt = pd.Series()
total_liabilities = pd.Series()
total_liab_equity = pd.Series()
total_equity = pd.Series()
cash_change = pd.Series()
cash_operating = pd.Series()
cash_investing = pd.Series()
cash_financing = pd.Series()
if not self.income.empty:
if '-' not in self.income.loc['Revenue'].tolist():
revenue = financial_fmt(self.income.loc['Revenue'])
else:
revenue = financial_fmt(self.income.loc['Total Revenue'])
net_income = financial_fmt(self.income.loc['Net Income'])
operate_income = financial_fmt(self.income.loc['Operating Income'])
if not self.balance.empty:
total_assets = financial_fmt(self.balance.loc['Total Assets'])
total_debt = financial_fmt(self.balance.loc['Total Debt'])
total_liabilities = financial_fmt(self.balance.loc['Total Liabilities'])
total_liab_equity = financial_fmt(self.balance.loc['Total Liabilities & Shareholders\' Equity'])
if not self.cashflow.empty:
cash_change = financial_fmt(self.cashflow.loc['Net Change in Cash'])
cash_operating = financial_fmt(self.cashflow.loc['Cash from Operating Activities'])
cash_investing = financial_fmt(self.cashflow.loc['Cash from Investing Activities'])
cash_financing = financial_fmt(self.cashflow.loc['Cash from Financing Activities'])
if len(revenue) > 0:
revenue_momentum = find_trend(revenue, fit_poly=False)
profit_margins = net_income / revenue
profit_margin_moment = find_trend(profit_margins.dropna(how='all'), fit_poly=False)
operating_margins = operate_income / revenue
operate_margin_moment = find_trend(operating_margins.dropna(how='all'), fit_poly=False)
else:
revenue_momentum = 0
profit_margins = np.zeros(4)
profit_margin_moment = 0
operating_margins = np.zeros(4)
operate_margin_moment = 0
if len(total_assets) > 0:
asset_momentum = find_trend(total_assets.dropna(how='all'), fit_poly=False)
debt_to_assets = total_debt / total_assets
debt_assets_moment = find_trend(debt_to_assets.dropna(how='all'), fit_poly=False)
else:
asset_momentum = 0
debt_to_assets = np.zeros(4)
debt_assets_moment = 0
cash_operate_moment = find_trend(cash_operating.dropna(how='all'), fit_poly=False)
cash_invest_moment = find_trend(cash_investing.dropna(how='all'), fit_poly=False)
cash_finance_moment = find_trend(cash_financing.dropna(how='all'), fit_poly=False)
stats = [[self.sym, revenue_momentum, profit_margins[-1], profit_margins.mean(), profit_margin_moment, operating_margins[-1], operating_margins.mean(), operate_margin_moment, asset_momentum, debt_to_assets[-1], debt_to_assets.mean(), debt_assets_moment, cash_operate_moment, cash_invest_moment, cash_finance_moment]]
stats_df = DataFrame(stats, columns=labels)
stats_df.drop_duplicates(inplace=True)
stats_df.set_index('Symbol', inplace=True)
return stats_df
def additional_stats(self):
"""
Additional stats
"""
if self.quotes.empty:
self.get_quotes()
if self.quotes.empty:
return DataFrame()
labels = ['Symbol', 'EPSGrowth', 'Forward P/E', 'EarningsYield', 'ReturnOnCapital', 'ReceivablesTurnover', 'InventoryTurnover', 'AssetUtilization', 'OperatingProfitMargin']
eps_growth = (self.stats['EPSEstimateCurrentYear'][self.sym] - self.stats['EPS'][self.sym]) / self.stats['EPS'][self.sym] * 100 # percent
# Forward P/E = (current price / EPS estimate next year)
forward_pe = self.quotes[self._adj_close()][-1] / self.stats['EPSEstimateCurrentYear'][self.sym]
# Earnings Yield = (EPS last year) / (Share Price)
# Greenblatt's updated version:
# Earnings Yield = (EBIT + Depreciation – CapEx) / Enterprise Value,
# where Enterprise Value = (Market Value + Debt – Cash)
earnings_yield = 0
if (not self.balance.empty) and (self.stats['MarketCap'][0] > 0) and (self.stats['EBITDA'][0] > 0):
total_debt = financial_fmt(self.balance.loc['Total Debt'])[0] # in million
ent_value = self.stats['MarketCap'][0]*1000 + total_debt # TODO: minus cash
earnings_yield = self.stats['EBITDA'][0] / ent_value
if earnings_yield == 0: # try a different definition
earnings_yield = self.stats['EPS'][self.sym] / self.quotes[self._adj_close()][-1]
# Return on capital:
# ROIC = (NetOperatingProfit - AdjustedTaxes) / InvestedCapital
# or
# ROIC = (Net Income - Dividends) / TotalCapital
# where,
# InvestedCapital = FixedAssets + IntangibleAssets + CurrentAssets - CurrentLiabilities - Cash
# Receivables Turnover:
# Receivables Turnover = (12-month sales) / (4-quarter average receivables)
# a high ratio indicates that the company efficiently collects its accounts receivables or has quality customers.
# Inventory Turnover:
# Inventory Turnover = (12-month cost of goods sold (COGS)) / (a 4-quarter average inventory)
# a high value of the ratio indicates a low level of inventory relative to COGS, while a low ratio
# signals that the company has excess inventory.
# Asset Utilization = (12-month sales) / (12-month average of total assets)
# the higher the ratio, the greater is the chance that the company is utilizing its assets efficiently.
# Operating profit margin = (12-month operating income) / (12-month sales)
# indicates how well a company is controlling its operating expenses.
roic = 0
receivables_turnover = 0
inventory_turnover = 0
asset_utilization = 0
operating_profit_margin = 0
if not (self.income.empty or self.balance.empty or self.cashflow.empty):
#net_income = financial_fmt(self.income.loc['Net Income'])
operating_income = financial_fmt(self.income.loc['Operating Income']) # TODO: need a reliable way to cover negative operting income
adjusted_tax = financial_fmt(self.income.loc['Income Before Tax']) - financial_fmt(self.income.loc['Income After Tax'])
total_assets = financial_fmt(self.balance.loc['Total Assets'])
total_liabilities = financial_fmt(self.balance.loc['Total Liabilities'])
#cash_from_operating = financial_fmt(self.cashflow.loc['Cash from Operating Activities'])
total_receivables = financial_fmt(self.balance.loc['Total Receivables, Net'])
total_inventory = financial_fmt(self.balance.loc['Total Inventory'])
if '-' not in self.income.loc['Total Revenue'].tolist():
total_sales = financial_fmt(self.income.loc['Total Revenue'])
else:
total_sales = financial_fmt(self.income.loc['Revenue'])
total_cost = financial_fmt(self.income.loc['Cost of Revenue, Total'])
cash = 0 # TODO: calculate total cash
l = min(len(operating_income), len(total_assets), len(total_liabilities))
if l > 0:
invested_capital = sum(total_assets[:l]) - sum(total_liabilities[:l]) - cash
#roic = np.divide(net_income[:l], invested_capital) # TODO: minus dividends
roic = np.divide((sum(operating_income[:l]) - sum(adjusted_tax[:l])), invested_capital)
l = min(len(total_sales), len(total_receivables), 4)
if l > 0:
receivables_turnover = np.divide(sum(total_sales[:l]), sum(total_receivables[:l]))
l = min(len(total_cost), len(total_inventory), 4)
if l > 0:
inventory_turnover = np.divide(sum(total_cost[:l]), sum(total_inventory[:l]))
l = min(len(total_sales), len(total_assets), 4)
if l > 0:
asset_utilization = np.divide(sum(total_sales[:l]), sum(total_assets[:l]))
l = min(len(operating_income), len(total_sales), 4)
if l > 0:
operating_profit_margin = np.divide(sum(operating_income[:l]), sum(total_sales[:l]))
stat = [[self.sym, eps_growth, forward_pe, earnings_yield, roic, receivables_turnover, inventory_turnover, asset_utilization, operating_profit_margin]]
stat = DataFrame(stat, columns=labels)
stat.drop_duplicates(inplace=True)
stat.set_index('Symbol', inplace=True)
return stat
def get_stats(self, index=None, exclude_name=False, exclude_dividend=False):
"""
Calculate all stats.
index: Symbol of index
"""
if self.quotes.empty:
self.get_quotes()
if self.quotes.empty:
print("%s: ERROR - cannot download quotes, no statistics available." %self.sym)
return DataFrame()
elif self.quotes is None:
print("%s: ERROR - invalid quotes." %self.sym)
return DataFrame()
# make sure quotes are numbers
if self.quotes[self._adj_close()].dtypes != np.dtype('float64'):
m = (self.quotes != 'null')
self.quotes = self.quotes.where(m, np.nan).dropna(how='any').astype(float)
# Yahoo Finance statistics - it must be downloaded before other stats
self.stats = get_symbol_yahoo_stats([self.sym], exclude_name=exclude_name)
if 'Exchange' in self.stats.columns:
self.exch = self.stats['Exchange'][self.sym]
# stats of return based on history quotes
growth_stats = self.growth_stats(exclude_dividend=exclude_dividend)
self.stats = self.stats.join(growth_stats)
# diverge to index stats
relative_growth_stats = self.relative_growth_stats(index)
self.stats = self.stats.join(relative_growth_stats)
# trend & momentum
trend_stats = self.trend_stats()
self.stats = self.stats.join(trend_stats)
# financial stats
financial_stats = self.financial_stats(exchange=self.exch)
self.stats = self.stats.join(financial_stats)
# additional stats
add_stats = self.additional_stats()
self.stats = self.stats.join(add_stats)
return self.stats.transpose() # transpose for the sake of display
### Momentum ###
def momentum(self, n=2, start=None, end=None):
"""
Momentum, defined as
Momentum = Today's closing price - Closing price X days ago
Return - pandas Series of price differences
"""
if self.quotes.empty:
self.get_quotes()
if self.quotes.empty:
return pd.Series()
[start_date, end_date] = self._handle_start_end_dates(start, end)
stock = self.quotes[self._adj_close()] # calc momentum for all hist data
calc = lambda x: x[-1] - x[0]
m = stock.rolling(window = n, center = False).apply(calc).dropna()
return m[start_date.strftime('%Y-%m-%d'):end_date.strftime('%Y-%m-%d')]
def roc(self, n=10, start=None, end=None):
"""
Rate of Change(ROC), defined as
ROC = ((current value / previous value) - 1) x 100
Return - pandas Series with dates as index.
"""
if self.quotes.empty:
self.get_quotes()
if self.quotes.empty:
return pd.Series()
[start_date, end_date] = self._handle_start_end_dates(start, end)
stock = self.quotes[self._adj_close()]
calc = lambda x: (x[-1]/x[0] - 1) * 100
rates = stock.rolling(window = n, center = False).apply(calc).dropna()
return rates[start_date.strftime('%Y-%m-%d'):end_date.strftime('%Y-%m-%d')]
def macd(self, start=None, end=None):
"""
Moving Average Convergence/Divergence(MACD)
The MACD indicator (or "oscillator") is a collection of three time series calculated
from historical price data: the MACD series proper, the "signal" or "average" series,
and the "divergence" series which is the difference between the two.
The most commonly used values are 12, 26, and 9 days, that is, MACD(12,26,9):
MACD Line = (12-period EMA – 26-period EMA)
Signal Line = 9-period EMA
Histogram = MACD Line – Signal Line
Return: list of [MACD Line, Signal Line, Histogram], all in pandas Series format.
"""
[start_date, end_date] = self._handle_start_end_dates(start, end)
rng = pd.date_range(start=start_date, end=end_date, freq='D')
fastema = self.ema(n=12)
slowema = self.ema(n=26)
macdline = fastema-slowema
macdline = macdline.dropna()
signal = pd.Series(moving_average(macdline, n=9, type='exponential'), index=macdline.index)
hist = macdline-signal
return [macdline[rng].dropna(), signal[rng].dropna(), hist[rng].dropna()]
def rsi(self, n=14, start=None, end=None):
"""
Relative Strenth Index(RSI)
Return a Pandas Series of RSI.
The standard algorithm of calculating RSI is:
100
RSI = 100 - --------
1 + RS
RS = Average Gain / Average Loss
The very first calculations for average gain and average loss are simple 14 period averages.
First Average Gain = Sum of Gains over the past 14 periods / 14.
First Average Loss = Sum of Losses over the past 14 periods / 14.
The second, and subsequent, calculations are based on the prior averages and the current gain loss:
Average Gain = [(previous Average Gain) x 13 + current Gain] / 14.
Average Loss = [(previous Average Loss) x 13 + current Loss] / 14.
"""
if self.quotes.empty:
self.get_quotes()
if self.quotes.empty:
return pd.Series()
# RSI is start date sensitive
[start_date, end_date] = self._handle_start_end_dates(start, end)
tmp_start = start_date - BDay(n) # The first n days are used for init, so go back for n business days
prices = self.quotes[self._adj_close()][tmp_start.strftime('%Y-%m-%d'):end_date.strftime('%Y-%m-%d')]
m = np.diff(prices)
# initialization
seed = m[:n+1] # cause the diff is 1 shorter
up = seed[seed>=0].sum()/n
down = -seed[seed<0].sum()/n # losses should be positive
rsi = np.zeros_like(prices)
rsi[:n] = 100. - 100./(1. + up/down)
# subsequent calculations
for i in np.arange(n, len(prices)):
d = m[i-1]
if d > 0:
gain = d
loss = 0
else:
gain = 0
loss = -d # losses should be positive
up = (up*(n - 1) + gain)/n
down = (down*(n - 1) + loss)/n
rsi[i] = 100. - 100/(1. + up/down)
rsi = pd.Series(rsi, index=prices.index) # price diff drops the fist date
return rsi[start_date.strftime('%Y-%m-%d'):end_date.strftime('%Y-%m-%d')].dropna()
def stochastic(self, nK=14, nD=3, start=None, end=None):
"""
Stochastic Oscillator
Inputs:
nK - window of fast stochastic oscillator %K
nD - window of slow stochastic oscillator %D
Return both fast and slow stochastic oscillators, as Pandas Series.
They are calculated by:
Stochastic Oscillator(%K) = (Close Price - Lowest Low) / (Highest High - Lowest Low) * 100
Fast %D = 3-day SMA of %K
Slow %D = 3-day SMA of fast %D
where typical values for N are 5, 9, or 14 periods.
"""
if self.quotes.empty:
self.get_quotes()
if self.quotes.empty:
return [pd.Series(), pd.Series()]
close = self.quotes[self._adj_close()]
if len(close) <= nK:
return [pd.Series(), pd.Series()]
try:
ratio = self.quotes[self._adj_close()] / self.quotes['Close']
except ZeroDivisionError:
ratio = 1
high = self.quotes['High'] * ratio # adjusted high
low = self.quotes['Low'] * ratio # adjusted low
sto = np.zeros_like(close)
for i in np.arange(nK, len(close)+1):
s = close[i-nK : i]
h = high[i-nK : i]
l = low[i-nK : i]
if max(h) != min(l):
denominator = max(h) - min(l)
else:
# avoid devide-by-zero error
denominator = random.random() / 1000
sto[i-1] = (s[-1]-min(l))/denominator * 100
sto[:nK-1] = sto[nK-1]
K = pd.Series(sto, index=close.index)
D = pd.Series(moving_average(K, n=nD, type='simple'), index=K.index)
[start_date, end_date] = self._handle_start_end_dates(start, end)
rng = pd.date_range(start=start_date, end=end_date, freq='D')
return [K[rng].dropna(), D[rng].dropna()]
def plot(self, start=None, end=None):
"""
Plot price changes and related indicators.
"""
if start == None and end == None:
# set default range to 180 days
end_date = dt.datetime.today().date()
start_date = end_date - dt.timedelta(days=180)
else:
[start_date, end_date] = self._handle_start_end_dates(start, end)
if self.name != None:
ticker = self.name
else:
ticker = self.sym
fillcolor_gold = 'darkgoldenrod'
fillcolor_red = 'lightsalmon'
fillcolor_green = 'lightgreen'
nrows = 6
fig,ax=plt.subplots(nrows,1,sharex=True)
# plot price, volume and EMA
ema10 = self.ema(n=10, start=start_date, end=end_date)
ema30 = self.ema(n=30, start=start_date, end=end_date)
price = self.quotes[self._adj_close()][start_date.strftime('%Y-%m-%d'):end_date.strftime('%Y-%m-%d')]
ax_ema = plt.subplot(nrows, 1, (1,2))
ax_ema.fill_between(np.asarray(price.index), price.min(), np.asarray(price), facecolor='lightskyblue', linewidth=0.0)
ema10.plot(grid=True, label='EMA(10)', color='red')
ema30.plot(grid=True, label='EMA(30)', color='darkgreen')
plt.legend(fontsize='xx-small', loc='upper left')
ax_ema.set_ylim(bottom=price.min()) # change bottom scale
ax_ema.set_ylabel('Price')
ax_ema.set_xticklabels([]) # hide x-axis labels
# plot ROC
window = 10
roc = self.roc(n=window, start=start_date, end=end_date)
ax_roc = plt.subplot(nrows, 1, 3)
roc.plot(grid=True, label='ROC(%d)'%window)
bottom, top = ax_roc.get_ylim()
ax_roc.set_yticks(np.round(np.linspace(bottom, top, num=4), decimals=0)) # reduce y-axis ticks
if top >= 0 and bottom <= 0:
ax_roc.axhline(0, color=fillcolor_gold)
plt.legend(fontsize='xx-small', loc='upper left')
ax_roc.set_ylabel('ROC')
ax_roc.set_xticklabels([]) # hide x-axis labels
# plot RSI
window = 14
rsi = self.rsi(n=window, start=start_date, end=end_date)
ax_rsi = plt.subplot(nrows, 1, 4)
rsi.plot(grid=True, label='RSI(%d)'%window)
ax_rsi.set_ylim(0, 100)
bottom, top = ax_rsi.get_ylim()
ax_rsi.set_yticks(np.round(np.linspace(bottom, top, num=4), decimals=0)) # reduce y-axis ticks
ax_rsi.fill_between(np.asarray(rsi.index), 70, 100, facecolor=fillcolor_red, alpha=0.5, linewidth=0.0)
ax_rsi.fill_between(np.asarray(rsi.index), 0, 30, facecolor=fillcolor_green, alpha=0.5, linewidth=0.0)
plt.legend(fontsize='xx-small', loc='upper left')
ax_rsi.set_ylabel('RSI')
ax_rsi.set_xticklabels([]) # hide x-axis labels
# plot MACD
[macd, signal, hist] = self.macd(start=start_date, end=end_date)
ax_macd = plt.subplot(nrows, 1, 5)
ax_macd.bar(np.asarray(hist.index), np.asarray(hist), width=0.1, color=fillcolor_gold)
ax_macd.fill_between(np.asarray(hist.index), np.asarray(hist), 0, facecolor=fillcolor_gold, edgecolor=fillcolor_gold)
macd.plot(grid=True, label='MACD(12,26)', color='red')
signal.plot(grid=True, label='EMA(9)', color='darkgreen')
plt.legend(fontsize='xx-small', loc='upper left')
bottom, top = ax_macd.get_ylim()
if top >= 0 and bottom <= 0:
ax_roc.axhline(0, color=fillcolor_gold)
ax_macd.set_yticks(np.round(np.linspace(bottom, top, num=4), decimals=1)) # reduce y-axis ticks
ax_macd.set_ylabel('MACD')
ax_macd.set_xticklabels([]) # hide x-axis labels
# plot Stochastic Oscilator
n_k = 14
n_d = 3
K,D = self.stochastic(nK=n_k, nD=n_d, start=start_date, end=end_date)
ax_sto = plt.subplot(nrows, 1, 6)
K.plot(grid=True, label='%'+'K(%d)'%n_k, color='red')
D.plot(grid=True, label='%'+'D(%d)'%n_d, color='darkgreen')
bottom, top = ax_sto.get_ylim()
ax_sto.set_yticks(np.round(np.linspace(bottom, top, num=4), decimals=0)) # reduce y-axis ticks
ax_sto.fill_between(np.asarray(K.index), 80, 100, facecolor=fillcolor_red, alpha=0.5, linewidth=0.0)
ax_sto.fill_between(np.asarray(K.index), 0, 20, facecolor=fillcolor_green, alpha=0.5, linewidth=0.0)
plt.legend(fontsize='xx-small', loc='upper left')
ax_sto.set_ylabel('FSTO')
fig.suptitle(ticker)
fig.autofmt_xdate()
fig.show()
return
### Insider Trade ###
def get_insider_trade(self):
"""
NOT IMPLEMENTED YET
"""
# links: http://insidertrading.org/
# Insider sale:
# http://openinsider.com/screener?s=AMD&xs=1
# Insider purchase:
# http://openinsider.com/screener?s=OPK&xp=1
# TODO: download insider trade history
return
| mit |
ual/urbansim | urbansim/models/tests/test_dcm.py | 6 | 22108 | import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
import os
import tempfile
import yaml
from pandas.util import testing as pdt
from ...utils import testing
from .. import dcm
@pytest.fixture
def seed(request):
current = np.random.get_state()
def fin():
np.random.set_state(current)
request.addfinalizer(fin)
np.random.seed(0)
@pytest.fixture
def choosers():
return pd.DataFrame(
{'var1': range(5, 10),
'thing_id': ['a', 'c', 'e', 'g', 'i']})
@pytest.fixture
def grouped_choosers(choosers):
choosers['group'] = ['x', 'y', 'x', 'x', 'y']
return choosers
@pytest.fixture
def alternatives():
return pd.DataFrame(
{'var2': range(10, 20),
'var3': range(20, 30)},
index=pd.Index([x for x in 'abcdefghij'], name='thing_id'))
@pytest.fixture
def basic_dcm():
model_exp = 'var2 + var1:var3'
sample_size = 5
probability_mode = 'full_product'
choice_mode = 'individual'
choosers_fit_filters = ['var1 != 5']
choosers_predict_filters = ['var1 != 7']
alts_fit_filters = ['var3 != 15']
alts_predict_filters = ['var2 != 14']
interaction_predict_filters = None
estimation_sample_size = None
prediction_sample_size = None
choice_column = None
name = 'Test LCM'
model = dcm.MNLDiscreteChoiceModel(
model_exp, sample_size,
probability_mode, choice_mode,
choosers_fit_filters, choosers_predict_filters,
alts_fit_filters, alts_predict_filters,
interaction_predict_filters, estimation_sample_size,
prediction_sample_size, choice_column, name)
return model
@pytest.fixture
def basic_dcm_fit(basic_dcm, choosers, alternatives):
basic_dcm.fit(choosers, alternatives, choosers.thing_id)
return basic_dcm
def test_unit_choice_uniform(choosers, alternatives):
probabilities = [1] * len(alternatives)
choices = dcm.unit_choice(
choosers.index, alternatives.index, probabilities)
npt.assert_array_equal(choices.index, choosers.index)
assert choices.isin(alternatives.index).all()
def test_unit_choice_some_zero(choosers, alternatives):
probabilities = [0, 1, 0, 1, 1, 0, 1, 0, 0, 1]
choices = dcm.unit_choice(
choosers.index, alternatives.index, probabilities)
npt.assert_array_equal(choices.index, choosers.index)
npt.assert_array_equal(sorted(choices.values), ['b', 'd', 'e', 'g', 'j'])
def test_unit_choice_not_enough(choosers, alternatives):
probabilities = [0, 0, 0, 0, 0, 1, 0, 1, 0, 0]
choices = dcm.unit_choice(
choosers.index, alternatives.index, probabilities)
npt.assert_array_equal(choices.index, choosers.index)
assert choices.isnull().sum() == 3
npt.assert_array_equal(sorted(choices[~choices.isnull()]), ['f', 'h'])
def test_unit_choice_none_available(choosers, alternatives):
probabilities = [0] * len(alternatives)
choices = dcm.unit_choice(
choosers.index, alternatives.index, probabilities)
npt.assert_array_equal(choices.index, choosers.index)
assert choices.isnull().all()
def test_mnl_dcm_prob_choice_mode_compat(basic_dcm):
with pytest.raises(ValueError):
dcm.MNLDiscreteChoiceModel(
basic_dcm.model_expression, basic_dcm.sample_size,
probability_mode='single_chooser', choice_mode='individual')
with pytest.raises(ValueError):
dcm.MNLDiscreteChoiceModel(
basic_dcm.model_expression, basic_dcm.sample_size,
probability_mode='full_product', choice_mode='aggregate')
def test_mnl_dcm_prob_mode_interaction_compat(basic_dcm):
with pytest.raises(ValueError):
dcm.MNLDiscreteChoiceModel(
basic_dcm.model_expression, basic_dcm.sample_size,
probability_mode='full_product', choice_mode='individual',
interaction_predict_filters=['var1 > 9000'])
def test_mnl_dcm(seed, basic_dcm, choosers, alternatives):
assert basic_dcm.choosers_columns_used() == ['var1']
assert set(basic_dcm.alts_columns_used()) == {'var2', 'var3'}
assert set(basic_dcm.interaction_columns_used()) == \
{'var1', 'var2', 'var3'}
assert set(basic_dcm.columns_used()) == {'var1', 'var2', 'var3'}
loglik = basic_dcm.fit(choosers, alternatives, choosers.thing_id)
basic_dcm.report_fit()
# hard to test things exactly because there's some randomness
# involved, but can at least do a smoke test.
assert len(loglik) == 3
assert len(basic_dcm.fit_parameters) == 2
assert len(basic_dcm.fit_parameters.columns) == 3
filtered_choosers, filtered_alts = basic_dcm.apply_predict_filters(
choosers, alternatives)
probs = basic_dcm.probabilities(choosers, alternatives)
assert len(probs) == len(filtered_choosers) * len(filtered_alts)
sprobs = basic_dcm.summed_probabilities(choosers, alternatives)
assert len(sprobs) == len(filtered_alts)
pdt.assert_index_equal(
sprobs.index, filtered_alts.index, check_names=False)
npt.assert_allclose(sprobs.sum(), len(filtered_choosers))
choices = basic_dcm.predict(choosers.iloc[1:], alternatives)
pdt.assert_series_equal(
choices,
pd.Series(
['h', 'c', 'f'], index=pd.Index([1, 3, 4], name='chooser_id')))
# check that we can do a YAML round-trip
yaml_str = basic_dcm.to_yaml()
new_model = dcm.MNLDiscreteChoiceModel.from_yaml(yaml_str)
assert new_model.fitted
testing.assert_frames_equal(
basic_dcm.fit_parameters, new_model.fit_parameters)
def test_mnl_dcm_repeated_alts(basic_dcm, choosers, alternatives):
interaction_predict_filters = ['var1 * var2 > 50']
choice_column = 'thing_id'
basic_dcm.probability_mode = 'single_chooser'
basic_dcm.choice_mode = 'aggregate'
basic_dcm.interaction_predict_filters = interaction_predict_filters
basic_dcm.choice_column = choice_column
loglik = basic_dcm.fit(choosers, alternatives, 'thing_id')
basic_dcm.report_fit()
# hard to test things exactly because there's some randomness
# involved, but can at least do a smoke test.
assert len(loglik) == 3
assert len(basic_dcm.fit_parameters) == 2
assert len(basic_dcm.fit_parameters.columns) == 3
repeated_index = alternatives.index.repeat([1, 2, 3, 2, 4, 3, 2, 1, 5, 8])
repeated_alts = alternatives.loc[repeated_index].reset_index()
choices = basic_dcm.predict(choosers, repeated_alts)
pdt.assert_index_equal(choices.index, pd.Index([0, 1, 3, 4]))
assert choices.isin(repeated_alts.index).all()
def test_mnl_dcm_yaml(basic_dcm, choosers, alternatives):
expected_dict = {
'model_type': 'discretechoice',
'model_expression': basic_dcm.model_expression,
'sample_size': basic_dcm.sample_size,
'name': basic_dcm.name,
'probability_mode': basic_dcm.probability_mode,
'choice_mode': basic_dcm.choice_mode,
'choosers_fit_filters': basic_dcm.choosers_fit_filters,
'choosers_predict_filters': basic_dcm.choosers_predict_filters,
'alts_fit_filters': basic_dcm.alts_fit_filters,
'alts_predict_filters': basic_dcm.alts_predict_filters,
'interaction_predict_filters': basic_dcm.interaction_predict_filters,
'estimation_sample_size': basic_dcm.estimation_sample_size,
'prediction_sample_size': basic_dcm.prediction_sample_size,
'choice_column': basic_dcm.choice_column,
'fitted': False,
'log_likelihoods': None,
'fit_parameters': None
}
assert yaml.load(basic_dcm.to_yaml()) == expected_dict
new_mod = dcm.MNLDiscreteChoiceModel.from_yaml(basic_dcm.to_yaml())
assert yaml.load(new_mod.to_yaml()) == expected_dict
basic_dcm.fit(choosers, alternatives, 'thing_id')
expected_dict['fitted'] = True
del expected_dict['log_likelihoods']
del expected_dict['fit_parameters']
actual_dict = yaml.load(basic_dcm.to_yaml())
assert isinstance(actual_dict.pop('log_likelihoods'), dict)
assert isinstance(actual_dict.pop('fit_parameters'), dict)
assert actual_dict == expected_dict
new_mod = dcm.MNLDiscreteChoiceModel.from_yaml(basic_dcm.to_yaml())
assert new_mod.fitted is True
def test_mnl_dcm_prob_mode_single(seed, basic_dcm_fit, choosers, alternatives):
basic_dcm_fit.probability_mode = 'single_chooser'
filtered_choosers, filtered_alts = basic_dcm_fit.apply_predict_filters(
choosers, alternatives)
probs = basic_dcm_fit.probabilities(choosers.iloc[1:], alternatives)
pdt.assert_series_equal(
probs,
pd.Series(
[0.25666709612190147,
0.20225620916965448,
0.15937989234214262,
0.1255929308043417,
0.077988133629030815,
0.061455420294827229,
0.04842747874412457,
0.038161332007195688,
0.030071506886781514],
index=pd.MultiIndex.from_product(
[[1], filtered_alts.index.values],
names=['chooser_id', 'alternative_id'])))
sprobs = basic_dcm_fit.summed_probabilities(choosers, alternatives)
pdt.assert_index_equal(
sprobs.index, filtered_alts.index, check_names=False)
npt.assert_allclose(sprobs.sum(), len(filtered_choosers))
def test_mnl_dcm_prob_mode_single_prediction_sample_size(
seed, basic_dcm_fit, choosers, alternatives):
basic_dcm_fit.probability_mode = 'single_chooser'
basic_dcm_fit.prediction_sample_size = 5
filtered_choosers, filtered_alts = basic_dcm_fit.apply_predict_filters(
choosers, alternatives)
probs = basic_dcm_fit.probabilities(choosers.iloc[1:], alternatives)
pdt.assert_series_equal(
probs,
pd.Series(
[0.11137766,
0.05449957,
0.14134044,
0.22761617,
0.46516616],
index=pd.MultiIndex.from_product(
[[1], ['g', 'j', 'f', 'd', 'a']],
names=['chooser_id', 'alternative_id'])))
sprobs = basic_dcm_fit.summed_probabilities(choosers, alternatives)
pdt.assert_index_equal(
sprobs.index,
pd.Index(['d', 'g', 'a', 'c', 'd'], name='alternative_id'))
npt.assert_allclose(sprobs.sum(), len(filtered_choosers))
def test_mnl_dcm_prob_mode_full_prediction_sample_size(
seed, basic_dcm_fit, choosers, alternatives):
basic_dcm_fit.probability_mode = 'full_product'
basic_dcm_fit.prediction_sample_size = 5
filtered_choosers, filtered_alts = basic_dcm_fit.apply_predict_filters(
choosers, alternatives)
probs = basic_dcm_fit.probabilities(choosers.iloc[1:], alternatives)
assert len(probs) == (len(filtered_choosers) - 1) * 5
npt.assert_allclose(probs.sum(), len(filtered_choosers) - 1)
sprobs = basic_dcm_fit.summed_probabilities(choosers, alternatives)
pdt.assert_index_equal(
sprobs.index, filtered_alts.index, check_names=False)
npt.assert_allclose(sprobs.sum(), len(filtered_choosers))
def test_mnl_dcm_choice_mode_agg(seed, basic_dcm_fit, choosers, alternatives):
basic_dcm_fit.probability_mode = 'single_chooser'
basic_dcm_fit.choice_mode = 'aggregate'
filtered_choosers, filtered_alts = basic_dcm_fit.apply_predict_filters(
choosers, alternatives)
choices = basic_dcm_fit.predict(choosers, alternatives)
pdt.assert_series_equal(
choices,
pd.Series(['f', 'a', 'd', 'c'], index=[0, 1, 3, 4]))
def test_mnl_dcm_group(seed, grouped_choosers, alternatives):
model_exp = 'var2 + var1:var3'
sample_size = 4
choosers_predict_filters = ['var1 != 7']
alts_predict_filters = ['var2 != 14']
group = dcm.MNLDiscreteChoiceModelGroup('group')
group.add_model_from_params(
'x', model_exp, sample_size,
choosers_predict_filters=choosers_predict_filters)
group.add_model_from_params(
'y', model_exp, sample_size, alts_predict_filters=alts_predict_filters)
assert group.choosers_columns_used() == ['var1']
assert group.alts_columns_used() == ['var2']
assert set(group.interaction_columns_used()) == {'var1', 'var2', 'var3'}
assert set(group.columns_used()) == {'var1', 'var2', 'var3'}
assert group.fitted is False
logliks = group.fit(grouped_choosers, alternatives, 'thing_id')
assert group.fitted is True
assert 'x' in logliks and 'y' in logliks
assert isinstance(logliks['x'], dict) and isinstance(logliks['y'], dict)
probs = group.probabilities(grouped_choosers, alternatives)
for name, df in grouped_choosers.groupby('group'):
assert name in probs
filtered_choosers, filtered_alts = \
group.models[name].apply_predict_filters(df, alternatives)
assert len(probs[name]) == len(filtered_choosers) * len(filtered_alts)
filtered_choosers, filtered_alts = group.apply_predict_filters(
grouped_choosers, alternatives)
sprobs = group.summed_probabilities(grouped_choosers, alternatives)
assert len(sprobs) == len(filtered_alts)
pdt.assert_index_equal(
sprobs.index, filtered_alts.index, check_names=False)
choice_state = np.random.get_state()
choices = group.predict(grouped_choosers, alternatives)
pdt.assert_series_equal(
choices,
pd.Series(
['c', 'a', 'a', 'g'],
index=pd.Index([0, 3, 1, 4], name='chooser_id')))
# check that we don't get the same alt twice if they are removed
# make sure we're starting from the same random state as the last draw
np.random.set_state(choice_state)
group.remove_alts = True
choices = group.predict(grouped_choosers, alternatives)
pdt.assert_series_equal(
choices,
pd.Series(
['c', 'a', 'b', 'g'],
index=pd.Index([0, 3, 1, 4], name='chooser_id')))
def test_mnl_dcm_segmented_raises():
group = dcm.SegmentedMNLDiscreteChoiceModel('group', 2)
with pytest.raises(ValueError):
group.add_segment('x')
def test_mnl_dcm_segmented_prob_choice_mode_compat():
with pytest.raises(ValueError):
dcm.SegmentedMNLDiscreteChoiceModel(
'group', 10,
probability_mode='single_chooser', choice_mode='individual')
with pytest.raises(ValueError):
dcm.SegmentedMNLDiscreteChoiceModel(
'group', 10,
probability_mode='full_product', choice_mode='aggregate')
def test_mnl_dcm_segmented_prob_mode_interaction_compat():
with pytest.raises(ValueError):
dcm.SegmentedMNLDiscreteChoiceModel(
'group', 10,
probability_mode='full_product', choice_mode='individual',
interaction_predict_filters=['var1 > 9000'])
def test_mnl_dcm_segmented(seed, grouped_choosers, alternatives):
model_exp = 'var2 + var1:var3'
sample_size = 4
group = dcm.SegmentedMNLDiscreteChoiceModel(
'group', sample_size, default_model_expr=model_exp)
group.add_segment('x')
group.add_segment('y', 'var3 + var1:var2')
assert group.choosers_columns_used() == []
assert group.alts_columns_used() == []
assert set(group.interaction_columns_used()) == {'var1', 'var2', 'var3'}
assert set(group.columns_used()) == {'group', 'var1', 'var2', 'var3'}
assert group.fitted is False
logliks = group.fit(grouped_choosers, alternatives, 'thing_id')
assert group.fitted is True
assert 'x' in logliks and 'y' in logliks
assert isinstance(logliks['x'], dict) and isinstance(logliks['y'], dict)
probs = group.probabilities(grouped_choosers, alternatives)
for name, df in grouped_choosers.groupby('group'):
assert name in probs
assert len(probs[name]) == len(df) * len(alternatives)
sprobs = group.summed_probabilities(grouped_choosers, alternatives)
assert len(sprobs) == len(alternatives)
pdt.assert_index_equal(
sprobs.index, alternatives.index, check_names=False)
choice_state = np.random.get_state()
choices = group.predict(grouped_choosers, alternatives)
pdt.assert_series_equal(
choices,
pd.Series(
['c', 'a', 'b', 'a', 'j'],
index=pd.Index([0, 2, 3, 1, 4], name='chooser_id')))
# check that we don't get the same alt twice if they are removed
# make sure we're starting from the same random state as the last draw
np.random.set_state(choice_state)
group._group.remove_alts = True
choices = group.predict(grouped_choosers, alternatives)
pdt.assert_series_equal(
choices,
pd.Series(
['c', 'a', 'b', 'd', 'j'],
index=pd.Index([0, 2, 3, 1, 4], name='chooser_id')))
def test_mnl_dcm_segmented_yaml(grouped_choosers, alternatives):
model_exp = 'var2 + var1:var3'
sample_size = 4
group = dcm.SegmentedMNLDiscreteChoiceModel(
'group', sample_size, default_model_expr=model_exp, name='test_seg',
probability_mode='single_chooser', choice_mode='aggregate',
estimation_sample_size=20, prediction_sample_size=30)
group.add_segment('x')
group.add_segment('y', 'var3 + var1:var2')
expected_dict = {
'model_type': 'segmented_discretechoice',
'name': 'test_seg',
'segmentation_col': 'group',
'sample_size': sample_size,
'probability_mode': 'single_chooser',
'choice_mode': 'aggregate',
'choosers_fit_filters': None,
'choosers_predict_filters': None,
'alts_fit_filters': None,
'alts_predict_filters': None,
'interaction_predict_filters': None,
'estimation_sample_size': 20,
'prediction_sample_size': 30,
'choice_column': None,
'default_config': {
'model_expression': model_exp,
},
'remove_alts': False,
'fitted': False,
'models': {
'x': {
'name': 'x',
'fitted': False,
'log_likelihoods': None,
'fit_parameters': None
},
'y': {
'name': 'y',
'model_expression': 'var3 + var1:var2',
'fitted': False,
'log_likelihoods': None,
'fit_parameters': None
}
}
}
assert yaml.load(group.to_yaml()) == expected_dict
new_seg = dcm.SegmentedMNLDiscreteChoiceModel.from_yaml(group.to_yaml())
assert yaml.load(new_seg.to_yaml()) == expected_dict
group.fit(grouped_choosers, alternatives, 'thing_id')
expected_dict['fitted'] = True
expected_dict['models']['x']['fitted'] = True
expected_dict['models']['y']['fitted'] = True
del expected_dict['models']['x']['fit_parameters']
del expected_dict['models']['x']['log_likelihoods']
del expected_dict['models']['y']['fit_parameters']
del expected_dict['models']['y']['log_likelihoods']
actual_dict = yaml.load(group.to_yaml())
assert isinstance(actual_dict['models']['x'].pop('fit_parameters'), dict)
assert isinstance(actual_dict['models']['x'].pop('log_likelihoods'), dict)
assert isinstance(actual_dict['models']['y'].pop('fit_parameters'), dict)
assert isinstance(actual_dict['models']['y'].pop('log_likelihoods'), dict)
assert actual_dict == expected_dict
new_seg = dcm.SegmentedMNLDiscreteChoiceModel.from_yaml(group.to_yaml())
assert new_seg.fitted is True
# check that the segmented model's probability mode and choice mode
# are propogated to individual segments' models
assert (
new_seg._group.models['x'].probability_mode ==
expected_dict['probability_mode'])
assert (
new_seg._group.models['y'].choice_mode ==
expected_dict['choice_mode'])
assert (
new_seg._group.models['x'].estimation_sample_size ==
expected_dict['estimation_sample_size'])
assert (
new_seg._group.models['y'].prediction_sample_size ==
expected_dict['prediction_sample_size'])
def test_segmented_dcm_removes_old_models(grouped_choosers, alternatives):
model_exp = 'var2 + var1:var3'
sample_size = 4
group = dcm.SegmentedMNLDiscreteChoiceModel(
'group', sample_size, default_model_expr=model_exp)
group.add_segment('a')
group.add_segment('b')
group.add_segment('c')
group.fit(grouped_choosers, alternatives, 'thing_id')
assert sorted(group._group.models.keys()) == ['x', 'y']
def test_fit_from_cfg(basic_dcm, choosers, alternatives):
cfgname = tempfile.NamedTemporaryFile(suffix='.yaml').name
basic_dcm.to_yaml(cfgname)
dcm.MNLDiscreteChoiceModel.fit_from_cfg(
choosers, "thing_id", alternatives, cfgname)
dcm.MNLDiscreteChoiceModel.predict_from_cfg(
choosers, alternatives, cfgname)
dcm.MNLDiscreteChoiceModel.predict_from_cfg(choosers, alternatives,
cfgname, .2)
os.remove(cfgname)
def test_fit_from_cfg_segmented(grouped_choosers, alternatives):
model_exp = 'var2 + var1:var3'
sample_size = 4
group = dcm.SegmentedMNLDiscreteChoiceModel(
'group', sample_size, default_model_expr=model_exp)
group.add_segment('x')
group.add_segment('y', 'var3 + var1:var2')
cfgname = tempfile.NamedTemporaryFile(suffix='.yaml').name
group.to_yaml(cfgname)
dcm.SegmentedMNLDiscreteChoiceModel.fit_from_cfg(grouped_choosers,
"thing_id",
alternatives,
cfgname)
dcm.SegmentedMNLDiscreteChoiceModel.predict_from_cfg(grouped_choosers,
alternatives,
cfgname)
dcm.SegmentedMNLDiscreteChoiceModel.predict_from_cfg(grouped_choosers,
alternatives,
cfgname,
.8)
os.remove(cfgname)
| bsd-3-clause |
OpenSourcePolicyCenter/dynamic | ogusa/get_micro_data.py | 1 | 10394 | '''
------------------------------------------------------------------------
This program extracts tax rate and income data from the microsimulation
model (Tax-Calculator).
------------------------------------------------------------------------
'''
from taxcalc import Records, Calculator, Policy
from pandas import DataFrame
from dask import delayed, compute
import dask.multiprocessing
import numpy as np
import pickle
import pkg_resources
from ogusa.constants import DEFAULT_START_YEAR, TC_LAST_YEAR, PUF_START_YEAR
def get_calculator(baseline, calculator_start_year, reform=None,
data=None, gfactors=None, weights=None,
records_start_year=PUF_START_YEAR):
'''
This function creates the tax calculator object with the policy
specified in reform and the data specified with the data kwarg.
Args:
baseline (boolean): True if baseline tax policy
calculator_start_year (int): first year of budget window
reform (dictionary): IIT policy reform parameters, None if
baseline
data (DataFrame or str): DataFrame or path to datafile for
Records object
gfactors (Tax-Calculator GrowthFactors object): growth factors
to use to extrapolate data over budget window
weights (DataFrame): weights for Records object
records_start_year (int): the start year for the data and
weights dfs (default is set to the PUF start year as defined
in the Tax-Calculator project)
Returns:
calc1 (Tax-Calculator Calculator object): Calulator object with
current_year equal to calculator_start_year
'''
# create a calculator
policy1 = Policy()
if data is not None and "cps" in data:
records1 = Records.cps_constructor()
# impute short and long term capital gains if using CPS data
# in 2012 SOI data 6.587% of CG as short-term gains
records1.p22250 = 0.06587 * records1.e01100
records1.p23250 = (1 - 0.06587) * records1.e01100
# set total capital gains to zero
records1.e01100 = np.zeros(records1.e01100.shape[0])
elif data is not None: # pragma: no cover
records1 = Records(data=data, gfactors=gfactors, weights=weights,
start_year=records_start_year) # pragma: no cover
else: # pragma: no cover
records1 = Records() # pragma: no cover
if baseline:
if not reform:
print("Running current law policy baseline")
else:
print("Baseline policy is: ", reform)
else:
if not reform:
print("Running with current law as reform")
else:
print("Reform policy is: ", reform)
print("TYPE", type(reform))
policy1.implement_reform(reform)
# the default set up increments year to 2013
calc1 = Calculator(records=records1, policy=policy1)
# Check that start_year is appropriate
if calculator_start_year > TC_LAST_YEAR:
raise RuntimeError("Start year is beyond data extrapolation.")
return calc1
def get_data(baseline=False, start_year=DEFAULT_START_YEAR, reform={},
data=None, client=None, num_workers=1):
'''
This function creates dataframes of micro data with marginal tax
rates and information to compute effective tax rates from the
Tax-Calculator output. The resulting dictionary of dataframes is
returned and saved to disk in a pickle file.
Args:
baseline (boolean): True if baseline tax policy
calculator_start_year (int): first year of budget window
reform (dictionary): IIT policy reform parameters, None if
baseline
data (DataFrame or str): DataFrame or path to datafile for
Records object
client (Dask Client object): client for Dask multiprocessing
num_workers (int): number of workers to use for Dask
multiprocessing
Returns:
micro_data_dict (dict): dict of Pandas Dataframe, one for each
year from start_year to the maximum year Tax-Calculator can
analyze
taxcalc_version (str): version of Tax-Calculator used
'''
# Compute MTRs and taxes or each year, but not beyond TC_LAST_YEAR
lazy_values = []
for year in range(start_year, TC_LAST_YEAR + 1):
lazy_values.append(
delayed(taxcalc_advance)(baseline, start_year, reform,
data, year))
if client: # pragma: no cover
futures = client.compute(lazy_values, num_workers=num_workers)
results = client.gather(futures)
else:
results = results = compute(
*lazy_values, scheduler=dask.multiprocessing.get,
num_workers=num_workers)
# dictionary of data frames to return
micro_data_dict = {}
for i, result in enumerate(results):
year = start_year + i
micro_data_dict[str(year)] = DataFrame(result)
if baseline:
pkl_path = "micro_data_baseline.pkl"
else:
pkl_path = "micro_data_policy.pkl"
with open(pkl_path, "wb") as f:
pickle.dump(micro_data_dict, f)
# Do some garbage collection
del results
# Pull Tax-Calc version for reference
taxcalc_version = pkg_resources.get_distribution("taxcalc").version
return micro_data_dict, taxcalc_version
def taxcalc_advance(baseline, start_year, reform, data, year):
'''
This function advances the year used in Tax-Calculator, compute
taxes and rates, and save the results to a dictionary.
Args:
calc1 (Tax-Calculator Calculator object): TC calculator
year (int): year to begin advancing from
Returns:
tax_dict (dict): a dictionary of microdata with marginal tax
rates and other information computed in TC
'''
calc1 = get_calculator(baseline=baseline,
calculator_start_year=start_year,
reform=reform, data=data)
calc1.advance_to_year(year)
calc1.calc_all()
print('Year: ', str(calc1.current_year))
# define market income - taking expanded_income and excluding gov't
# transfer benefits found in the Tax-Calculator expanded income
market_income = (calc1.array('expanded_income') -
calc1.array('benefit_value_total'))
# Compute mtr on capital income
mtr_combined_capinc = cap_inc_mtr(calc1)
# Compute weighted avg mtr for labor income
# Note the index [2] in the mtr results means that we are pulling
# the combined mtr from the IIT + FICA taxes
mtr_combined_labinc = ((
calc1.mtr('e00200p')[2] * np.abs(calc1.array('e00200')) +
calc1.mtr('e00900p')[2] * np.abs(calc1.array('sey'))) /
(np.abs(calc1.array('sey')) + np.abs(calc1.array('e00200'))))
# Put MTRs, income, tax liability, and other variables in dict
length = len(calc1.array('s006'))
tax_dict = {
'mtr_labinc': mtr_combined_labinc,
'mtr_capinc': mtr_combined_capinc,
'age': calc1.array('age_head'),
'total_labinc': calc1.array('sey') + calc1.array('e00200'),
'total_capinc': (market_income -
calc1.array('sey') + calc1.array('e00200')),
'market_income': market_income,
'total_tax_liab': calc1.array('combined'),
'payroll_tax_liab': calc1.array('payrolltax'),
'etr': ((calc1.array('combined') - calc1.array('ubi')) /
market_income),
'year': calc1.current_year * np.ones(length),
'weight': calc1.array('s006')}
# garbage collection
del calc1
return tax_dict
def cap_inc_mtr(calc1): # pragma: no cover
'''
This function computes the marginal tax rate on capital income,
which is calculated as a weighted average of the marginal tax rates
on different sources of capital income.
Args:
calc1 (Tax-Calculator Calculator object): TC calculator
Returns:
mtr_combined_capinc (Numpy array): array with marginal tax rates
for each observation in the TC Records object
'''
# Note: PUF does not have variable for non-taxable IRA distributions
# Exclude Sch E income (e02000) from this list since we'll compute
# MTRs for this income in two parts - one for overall Sch C and one
# for S Corp and Partnerhsip income (e26270) (note that TaxCalc
# doesn't allow for an MTR on rents and royalties alone)
# e00300 = interest income
# e00400 = nontaxable interest income
# e00600 = ordinary dividend income
# e00650 = qualified dividend income
# e01400 = taxable IRA distributions
# e01700 = pension and annuity income
# p22250 = short term cap gain/loss
# p23250 = long term cap gain/loss
# e26270 = partnership and s corp income/loss
# e02000 = Sch E income (includes e26270)
capital_income_sources = (
'e00300', 'e00400', 'e00600', 'e00650', 'e01400', 'e01700',
'p22250', 'p23250', 'e26270')
rent_royalty_inc = np.abs(
calc1.array('e02000') - calc1.array('e26270'))
# assign overall Sch E mtr to rent and royalities since TC can't do
# this component separately
rent_royalty_mtr = calc1.mtr('e02000')[2]
# calculating MTRs separately - can skip items with zero tax
all_mtrs = {income_source: calc1.mtr(income_source) for
income_source in capital_income_sources}
# Get each column of income sources, to include non-taxable income
record_columns = [calc1.array(x) for x in capital_income_sources]
# Compute weighted average of all those MTRs
# first find total capital income
total_cap_inc = (sum(map(abs, record_columns)) + rent_royalty_inc)
# Note that all_mtrs gives fica (0), iit (1), and combined (2) mtrs
# We'll use the combined - hence all_mtrs[source][2]
capital_mtr = [abs(col) * all_mtrs[source][2] for col, source in
zip(record_columns, capital_income_sources)]
mtr_combined_capinc = np.zeros_like(total_cap_inc)
mtr_combined_capinc[total_cap_inc != 0] = (
sum(capital_mtr + rent_royalty_mtr *
rent_royalty_inc)[total_cap_inc != 0] /
total_cap_inc[total_cap_inc != 0])
mtr_combined_capinc[total_cap_inc == 0] = (
all_mtrs['e00300'][2][total_cap_inc == 0])
return mtr_combined_capinc
| mit |
DiCarloLab-Delft/PycQED_py3 | pycqed/simulations/cz_superoperator_simulation_new2.py | 1 | 31649 | import adaptive
from pycqed.measurement import measurement_control as mc
from pycqed.instrument_drivers.meta_instrument.LutMans import flux_lutman as flm
from pycqed.instrument_drivers.virtual_instruments import sim_control_CZ as scCZ
from pycqed.simulations import cz_superoperator_simulation_new_functions as czf
import numpy as np
from pycqed.measurement import detector_functions as det
import matplotlib.pyplot as plt
from qcodes import Instrument
from pycqed.measurement.waveform_control_CC import waveforms_flux as wfl
from scipy.interpolate import interp1d
import qutip as qtp
import cma
np.set_printoptions(threshold=np.inf)
import logging
log = logging.getLogger(__name__)
def f_to_parallelize_new(arglist):
# cluster wants a list as an argument.
# Below the various list items are assigned to their own variable
fitted_stepresponse_ty = arglist['fitted_stepresponse_ty']
fluxlutman_args = arglist['fluxlutman_args'] # see function return_instrument_args in czf
fluxlutman_static_args = arglist['fluxlutman_static_args'] # see function return_instrument_args in czf
sim_control_CZ_args = arglist['sim_control_CZ_args'] # see function return_instrument_args in czf
number = arglist['number']
adaptive_pars = arglist['adaptive_pars']
live_plot_enabled = arglist['live_plot_enabled']
which_gate = arglist['which_gate']
try:
MC = Instrument.find_instrument('MC'+'{}'.format(number))
except KeyError:
MC = mc.MeasurementControl('MC'+'{}'.format(number), live_plot_enabled=live_plot_enabled)
from qcodes import station
station = station.Station()
station.add_component(MC)
MC.station =station
fluxlutman = flm.HDAWG_Flux_LutMan('fluxlutman'+'{}'.format(number))
station.add_component(fluxlutman)
fluxlutman_static = flm.HDAWG_Flux_LutMan('fluxlutman_static'+'{}'.format(number))
station.add_component(fluxlutman_static)
sim_control_CZ = scCZ.SimControlCZ('sim_control_CZ'+'{}'.format(number))
station.add_component(sim_control_CZ)
fluxlutman, sim_control_CZ, fluxlutman_static = czf.return_instrument_from_arglist(
fluxlutman,
fluxlutman_args,
sim_control_CZ,
sim_control_CZ_args,
fluxlutman_static,
fluxlutman_static_args,
which_gate=which_gate)
cz_lambda_2 = fluxlutman.get('cz_lambda_2_{}'.format(which_gate))
cz_length = fluxlutman.get('cz_length_{}'.format(which_gate))
cz_theta_f = fluxlutman.get('cz_theta_f_{}'.format(which_gate))
czd_double_sided = fluxlutman.get('czd_double_sided_{}'.format(which_gate))
d=CZ_trajectory_superoperator(fluxlutman=fluxlutman, sim_control_CZ=sim_control_CZ,
fitted_stepresponse_ty=fitted_stepresponse_ty,
qois=adaptive_pars.get('qois', 'all'))
MC.set_detector_function(d)
exp_metadata = {'double sided':czd_double_sided,
'length': cz_length,
'distortions': sim_control_CZ.distortions(),
'T2_scaling': sim_control_CZ.T2_scaling(),
'sigma_q1': sim_control_CZ.sigma_q1(),
'sigma_q0': sim_control_CZ.sigma_q0()}
if adaptive_pars['mode']=='adaptive':
MC.set_sweep_functions([fluxlutman['cz_theta_f_{}'.format(which_gate)],
fluxlutman['cz_lambda_2_{}'.format(which_gate)]])
if adaptive_pars['uniform']:
loss_per_triangle= adaptive.learner.learner2D.uniform_loss
else:
loss_per_triangle=None
MC.set_adaptive_function_parameters(
{'adaptive_function': adaptive.Learner2D,
'loss_per_triangle': loss_per_triangle,
'goal':lambda l: l.npoints>adaptive_pars['n_points'],
'bounds':[(adaptive_pars['theta_f_min'], adaptive_pars['theta_f_max']),
(adaptive_pars['lambda2_min'], adaptive_pars['lambda2_max'])]})
if sim_control_CZ.cluster():
dat = MC.run('2D simulation_new_cluster2 double sided {} - length {:.1f} - distortions {} - waiting {:.2f} - T2_scaling {:.2f} - sigma_q1 {:.0f}, sigma_q0 {:.0f}'.format(czd_double_sided,
cz_length*1e9, sim_control_CZ.distortions(), sim_control_CZ.waiting_at_sweetspot(), sim_control_CZ.T2_scaling(), sim_control_CZ.sigma_q1()*1e6, sim_control_CZ.sigma_q0()*1e6),
mode='adaptive',exp_metadata=exp_metadata)
else:
if adaptive_pars['long_name']:
dat = MC.run('2D simulation_new_2 double sided {} - length {:.1f} - distortions {} - T2_scaling {:.1f} - sigma_q1 {:.0f}, sigma_q0 {:.0f}'.format(czd_double_sided,
cz_length*1e9, sim_control_CZ.distortions(), sim_control_CZ.T2_scaling(), sim_control_CZ.sigma_q1()*1e6, sim_control_CZ.sigma_q0()*1e6),
mode='adaptive',exp_metadata=exp_metadata)
else:
dat = MC.run('2D simulation_new_2',
exp_metadata=exp_metadata,
mode='adaptive')
elif adaptive_pars['mode']=='1D':
MC.MC.set_sweep_functions([fluxlutman['cz_theta_f_{}'.format(which_gate)]])
MC.set_sweep_points(np.linspace(adaptive_pars['theta_f_min'],
adaptive_pars['theta_f_max'],adaptive_pars['n_points']))
if sim_control_CZ.cluster():
dat = MC.run('1D simulation_new_cluster2 double sided {} - length {:.1f} - distortions {} - T2_scaling {:.1f} - sigma_q1 {:.0f}, sigma_q0 {:.0f}'.format(czd_double_sided,
cz_length*1e9, sim_control_CZ.distortions(), sim_control_CZ.T2_scaling(), sim_control_CZ.sigma_q1()*1e6, sim_control_CZ.sigma_q0()*1e6),
mode='1D',exp_metadata=exp_metadata)
else:
if adaptive_pars['long_name']:
dat = MC.run('1D simulation_new_2 double sided {} - length {:.1f} - distortions {} - T2_scaling {:.1f} - sigma_q1 {:.0f}, sigma_q0 {:.0f}'.format(czd_double_sided,
cz_length*1e9, sim_control_CZ.distortions(), sim_control_CZ.T2_scaling(), sim_control_CZ.sigma_q1()*1e6, sim_control_CZ.sigma_q0()*1e6),
mode='1D',exp_metadata=exp_metadata)
else:
dat = MC.run('1D simulation_new_2',
exp_metadata=exp_metadata,
mode='1D')
if adaptive_pars['mode']=='cma_optimizer':
MC.set_sweep_functions(MC.set_sweep_functions([fluxlutman['cz_theta_f_{}'.format(which_gate)],
fluxlutman['cz_lambda_2_{}'.format(which_gate)]]))
if adaptive_pars['uniform']:
loss_per_triangle= adaptive.learner.learner2D.uniform_loss
else:
loss_per_triangle=None
MC.set_adaptive_function_parameters(
{'adaptive_function': cma.fmin,
'x0': adaptive_pars['x0'], 'sigma0': adaptive_pars['sigma0'],
# options for the CMA algorithm can be found using
# "cma.CMAOptions()"
'options': {'maxfevals': adaptive_pars['n_points'], # maximum function cals
# Scaling for individual sigma's
'cma_stds': [5, 6, 3],
'ftarget': 0.005}, # Target function value
})
if sim_control_CZ.cluster():
dat = MC.run('2D simulation_new_cluster2 double sided {} - length {:.1f} - waiting {:.2f} - T2_scaling {:.2f} - sigma_q1 {:.0f}, sigma_q0 {:.0f}'.format(czd_double_sided,
cz_length*1e9, sim_control_CZ.waiting_at_sweetspot(), sim_control_CZ.T2_scaling(), sim_control_CZ.sigma_q1()*1e6, sim_control_CZ.sigma_q0()*1e6),
mode='adaptive',exp_metadata=exp_metadata)
else:
if adaptive_pars['long_name']:
dat = MC.run('2D simulation_new_2 double sided {} - length {:.1f} - distortions {} - T2_scaling {:.1f} - sigma_q1 {:.0f}, sigma_q0 {:.0f}'.format(czd_double_sided,
cz_length*1e9, sim_control_CZ.distortions(), sim_control_CZ.T2_scaling(), sim_control_CZ.sigma_q1()*1e6, sim_control_CZ.sigma_q0()*1e6),
mode='adaptive',exp_metadata=exp_metadata)
else:
dat = MC.run('2D simulation_new_2',
exp_metadata=exp_metadata,
mode='adaptive')
fluxlutman.close()
fluxlutman_static.close()
sim_control_CZ.close()
MC.close()
def compute_propagator(arglist):
# I was parallelizing this function in the cluster, then I changed but the list as an argument remains.
# Below each list item is assigned to its own variable
fluxbias_q0 = arglist['fluxbias_q0']
fluxbias_q1 = arglist['fluxbias_q1']
fitted_stepresponse_ty = arglist['fitted_stepresponse_ty']
fluxlutman = arglist['fluxlutman']
fluxlutman_static = arglist['fluxlutman_static']
sim_control_CZ = arglist['sim_control_CZ']
which_gate = sim_control_CZ.which_gate()
gates_num = int(sim_control_CZ.gates_num()) # repeat the same gate this number of times
gates_interval = sim_control_CZ.gates_interval() # idle time between repeated gates
q_J2 = fluxlutman.get('q_J2_{}'.format(which_gate))
czd_double_sided = fluxlutman.get('czd_double_sided_{}'.format(which_gate))
cz_length = fluxlutman.get('cz_length_{}'.format(which_gate))
cz_lambda_2 = fluxlutman.get('cz_lambda_2_{}'.format(which_gate))
cz_lambda_3 = fluxlutman.get('cz_lambda_3_{}'.format(which_gate))
cz_theta_f = fluxlutman.get('cz_theta_f_{}'.format(which_gate))
sim_step=1/fluxlutman.sampling_rate()
subdivisions_of_simstep=sim_control_CZ.simstep_div() # 4 is a good one, corresponding to a time step of 0.1 ns
sim_step_new=sim_step/subdivisions_of_simstep # waveform is generated according to sampling rate of AWG,
# but we can use a different step for simulating the time evolution
tlist = np.arange(0, cz_length, sim_step)
# residual_coupling=czf.conditional_frequency(0,fluxlutman,fluxlutman_static, which_gate=which_gate) # To check residual coupling at the operating point.
# print(residual_coupling) # Change amp to get the residual coupling at different points
eps_i = fluxlutman.calc_amp_to_eps(0, state_A='11', state_B='02', which_gate=which_gate)
theta_i = wfl.eps_to_theta(eps_i, g=q_J2) # Beware theta in radian!
if not czd_double_sided:
thetawave = wfl.martinis_flux_pulse(
length=cz_length,
lambda_2=cz_lambda_2,
lambda_3=cz_lambda_3,
theta_i=theta_i,
theta_f=np.deg2rad(cz_theta_f),
sampling_rate=fluxlutman.sampling_rate()) # return in terms of theta
epsilon = wfl.theta_to_eps(thetawave, q_J2)
amp = fluxlutman.calc_eps_to_amp(epsilon, state_A='11', state_B='02', which_gate=which_gate)
# transform detuning frequency to (positive) amplitude
else:
amp = get_f_pulse_double_sided(fluxlutman,theta_i, which_gate=which_gate)
# For better accuracy in simulations, redefine amp in terms of sim_step_new.
# We split here below in two cases to keep into account that certain times net-zero is one AWG time-step longer
# than the conventional pulse with the same pulse length.
if len(tlist) == len(amp):
tlist_temp=np.concatenate((tlist,np.array([cz_length])))
tlist_new = np.arange(0, cz_length,
sim_step_new)
else:
tlist_temp=np.concatenate((tlist,np.array([cz_length,cz_length+sim_step])))
tlist_new = np.arange(0, cz_length+sim_step,
sim_step_new)
amp_temp=np.concatenate((amp,np.array([0]))) # amp should come back to the initial value, i.e. at the sweet spot
amp_interp=interp1d(tlist_temp,amp_temp)
amp=amp_interp(tlist_new)
if czd_double_sided and sim_control_CZ.waiting_at_sweetspot() > 0:
tlist_new, amp = czf.add_waiting_at_sweetspot(tlist_new,amp, sim_control_CZ.waiting_at_sweetspot())
# Apply voltage scaling
amp = amp * sim_control_CZ.voltage_scaling_factor()
# Apply distortions
if sim_control_CZ.distortions():
amp_final = czf.distort_amplitude(fitted_stepresponse_ty=fitted_stepresponse_ty,amp=amp,tlist_new=tlist_new,sim_step_new=sim_step_new)
else:
amp_final = amp
# Uncomment to get plots of the distorted pulse.
# czf.plot(x_plot_vec=[np.array(tlist_new)*1e9],y_plot_vec=[amp_final],
# title='Pulse with distortions, absolute',
# xlabel='Time (ns)',ylabel='Amplitude (volts)')
# czf.plot(x_plot_vec=[np.array(tlist_new)*1e9],y_plot_vec=[amp_final-amp],
# title='Pulse with distortions, difference',
# xlabel='Time (ns)',ylabel='Amplitude (volts)')
# The fluxbias_q0 affects the pulse shape after the distortions have been taken into account
if sim_control_CZ.sigma_q0() != 0:
amp_final = czf.shift_due_to_fluxbias_q0(fluxlutman=fluxlutman,amp_final=amp_final,fluxbias_q0=fluxbias_q0,sim_control_CZ=sim_control_CZ, which_gate=which_gate)
intervals_list = np.zeros(np.size(tlist_new)) + sim_step_new
# We add the single qubit rotations at the end of the pulse
if sim_control_CZ.Z_rotations_length() > sim_step_new:
actual_Z_rotations_length = np.arange(0, sim_control_CZ.Z_rotations_length(), sim_step_new)[-1] + sim_step_new
intervals_list = np.append(intervals_list, [sim_step_new, actual_Z_rotations_length - sim_step_new])
amp_Z_rotation = [0, 0]
if sim_control_CZ.sigma_q0() != 0:
amp_Z_rotation = czf.shift_due_to_fluxbias_q0(fluxlutman=fluxlutman,amp_final=amp_Z_rotation,fluxbias_q0=fluxbias_q0,sim_control_CZ=sim_control_CZ, which_gate=which_gate)
# We add the idle time at the end of the pulse (even if it's not at the end. It doesn't matter)
if sim_control_CZ.total_idle_time() > sim_step_new:
actual_total_idle_time = np.arange(0, sim_control_CZ.total_idle_time(), sim_step_new)[-1] + sim_step_new
intervals_list = np.append(intervals_list, [sim_step_new, actual_total_idle_time - sim_step_new])
amp_idle_time = [0, 0]
# idle time is single-sided so we save the czd_double_sided value, set it to False
# and later restore it to the original value
double_sided = czd_double_sided
log.debug('Changing fluxlutman czd_double_sided_{} value to {}'.format(which_gate, False))
fluxlutman.set('czd_double_sided_{}'.format(which_gate), False)
if sim_control_CZ.sigma_q0() != 0:
amp_idle_time = czf.shift_due_to_fluxbias_q0(fluxlutman=fluxlutman,amp_final=amp_idle_time,fluxbias_q0=fluxbias_q0,sim_control_CZ=sim_control_CZ, which_gate=which_gate)
log.debug('Changing fluxlutman czd_double_sided_{} value back to {}'.format(which_gate, double_sided))
fluxlutman.set('czd_double_sided_{}'.format(which_gate), double_sided)
# We concatenate amp and f_pulse with the values they take during the Zrotations and idle_x
# It comes after the previous line because of details of the function czf.shift_due_to_fluxbias_q0
if sim_control_CZ.Z_rotations_length() > sim_step_new:
amp_final = np.concatenate((amp_final, amp_Z_rotation))
if sim_control_CZ.total_idle_time() > sim_step_new:
amp_final = np.concatenate((amp_final, amp_idle_time))
# czf.plot(x_plot_vec=[np.arange(0,np.size(intervals_list))],y_plot_vec=[amp_final],
# title='Pulse with (possibly) single qubit rotations and idle time',
# xlabel='Time (ns)',ylabel='Amplitude (volts)')
# czf.plot(x_plot_vec=[np.array(tlist_new)*1e9],y_plot_vec=[amp_final-amp_final_new],
# title='Pulse with distortions and shift due to fluxbias_q0, difference',
# xlabel='Time (ns)',ylabel='Amplitude (volts)')
# if gates_num > 1:
# orig_size = np.size(intervals_list)
# idle_size = int(gates_interval / sim_step_new)
# intervals_list = np.full(orig_size * gates_num + idle_size * (gates_num - 1), sim_step_new)
# amp_append = np.concatenate((np.zeros(idle_size), amp_final[:orig_size]))
# for gate in range(gates_num - 1):
# amp_final = np.append(amp_final, amp_append)
if gates_num > 1:
if gates_interval > 0:
# This is intended to make the simulation faster by skipping
# all the amp = 0 steps, verified to encrease sim speed
# 4.7s/data point -> 4.0s/data point
# Errors in simulation outcomes are < 1e-10
actual_gates_interval = np.arange(0, gates_interval, sim_step_new)[-1] + sim_step_new
# We add an extra small step to ensure the amp signal goes to
# zero first
interval_append = np.concatenate(([sim_step_new, actual_gates_interval - sim_step_new], intervals_list))
amp_append = np.concatenate(([0, 0], amp_final))
else:
interval_append = intervals_list
amp_append = amp_final
# Append arbitrary number of same gate
for gate in range(gates_num - 1):
amp_final = np.append(amp_final, amp_append)
intervals_list = np.append(intervals_list, interval_append)
# print('l_3={}\nl_2={}\ntheta_f={}'.format(fluxlutman.cz_lambda_3(), fluxlutman.cz_lambda_2(). fluxlutman.cz_theta_f_))
# print('np.array={}'.format(intervals_list))
# print('np.array={}'.format(amp_final))
# np.savez('l3={}'.format(fluxlutman.get('cz_lambda_3_{}'.format(which_gate))), x=intervals_list, y=amp_final)
# plt.plot(np.cumsum(intervals_list), amp_final)
# plt.show()
t_final = np.sum(intervals_list) # actual overall gate length
# Obtain jump operators for Lindblad equation
c_ops = czf.return_jump_operators(sim_control_CZ=sim_control_CZ, amp_final=amp_final, fluxlutman=fluxlutman, which_gate=which_gate)
# for waveformcomparizon purposes
# sim_control_CZ.sim_waveform(amp_final)
# Compute propagator
U_final = czf.time_evolution_new(
c_ops=c_ops,
sim_control_CZ=sim_control_CZ,
fluxlutman_static=fluxlutman_static,
fluxlutman=fluxlutman,
fluxbias_q1=fluxbias_q1,
amp=amp_final,
sim_step=sim_step_new,
intervals_list=intervals_list,
which_gate=which_gate)
# print(czf.verify_CPTP(U_superop_average)) # simple check of CPTP property
return [U_final, t_final]
def get_f_pulse_double_sided(fluxlutman,theta_i, which_gate: str = 'NE'):
cz_lambda_2 = fluxlutman.get('cz_lambda_2_{}'.format(which_gate))
cz_lambda_3 = fluxlutman.get('cz_lambda_3_{}'.format(which_gate))
cz_length = fluxlutman.get('cz_length_{}'.format(which_gate))
cz_theta_f = fluxlutman.get('cz_theta_f_{}'.format(which_gate))
czd_length_ratio = fluxlutman.get('czd_length_ratio_{}'.format(which_gate))
q_J2 = fluxlutman.get('q_J2_{}'.format(which_gate))
thetawave_A = wfl.martinis_flux_pulse(
length=cz_length*czd_length_ratio,
lambda_2=cz_lambda_2,
lambda_3=cz_lambda_3,
theta_i=theta_i,
theta_f=np.deg2rad(cz_theta_f),
sampling_rate=fluxlutman.sampling_rate()) # return in terms of theta
epsilon_A = wfl.theta_to_eps(thetawave_A, q_J2)
amp_A = fluxlutman.calc_eps_to_amp(epsilon_A, state_A='11', state_B='02', which_gate=which_gate)
# transform detuning frequency to positive amplitude
# Generate the second CZ pulse
thetawave_B = wfl.martinis_flux_pulse(
length=cz_length*(1-czd_length_ratio),
lambda_2=cz_lambda_2,
lambda_3=cz_lambda_3,
theta_i=theta_i,
theta_f=np.deg2rad(cz_theta_f),
sampling_rate=fluxlutman.sampling_rate()) # return in terms of theta
epsilon_B = wfl.theta_to_eps(thetawave_B, q_J2)
amp_B = fluxlutman.calc_eps_to_amp(epsilon_B, state_A='11', state_B='02', positive_branch=False, which_gate=which_gate)
# transform detuning frequency to negative amplitude
# N.B. No amp scaling and offset present
amp = np.concatenate([amp_A, amp_B])
return amp
class CZ_trajectory_superoperator(det.Soft_Detector):
def __init__(
self,
fluxlutman,
sim_control_CZ,
fluxlutman_static,
fitted_stepresponse_ty=None,
qois='all'
):
"""
Detector for simulating a CZ trajectory.
Args:
fluxlutman (instr): an instrument that contains the parameters
required to generate the waveform for the trajectory, and the hamiltonian as well.
sim_control_CZ: instrument that contains the noise parameters, plus some more
fitted_stepresponse_ty: list of two elements, corresponding to the time t
and the step response in volts along the y axis
qois: list
list of quantities of interest, this can be used to return
only a select set of values. The list should contain
entries of "value_names". if qois=='all', all quantities are returned.
Structure: compute input parameters necessary to compute time evolution (propagator), then compute quantities of interest
Returns: quantities of interest
"""
super().__init__()
self.value_names = ['Cost func', 'Cond phase', 'L1', 'L2', 'avgatefid_pc', 'avgatefid_compsubspace_pc',
'phase_q0', 'phase_q1', 'avgatefid_compsubspace', 'avgatefid_compsubspace_pc_onlystaticqubit', 'population_02_state',
'cond_phase02', 'coherent_leakage11', 'offset_difference', 'missing_fraction', '12_21_population_transfer', '12_03_population_transfer',
'phase_diff_12_02', 'phase_diff_21_20', 'cond_phase12', 'cond_phase21', 'cond_phase03', 'cond_phase20']
self.value_units = ['a.u.', 'deg', '%', '%', '%', '%', 'deg', 'deg', '%', '%', '%', 'deg', '%', '%', '%', '%', '%', 'deg', 'deg', 'deg', 'deg', 'deg', 'deg']
self.qois = qois
if self.qois != 'all':
self.qoi_mask = [self.value_names.index(q) for q in qois]
self.value_names = list(np.array(self.value_names)[self.qoi_mask])
self.value_units = list(np.array(self.value_units)[self.qoi_mask])
self.fluxlutman = fluxlutman
self.fluxlutman_static = fluxlutman_static
self.sim_control_CZ = sim_control_CZ
if fitted_stepresponse_ty is None:
self.fitted_stepresponse_ty = [np.array(1), np.array(1)]
else:
# list of 2 elements: stepresponse (=y) as a function of time (=t)
self.fitted_stepresponse_ty = fitted_stepresponse_ty
def acquire_data_point(self, **kw):
# Discretize average (integral) over a Gaussian distribution
mean = 0
sigma_q0 = self.sim_control_CZ.sigma_q0()
sigma_q1 = self.sim_control_CZ.sigma_q1() # one for each qubit, in units of Phi_0
qoi_plot = [] # used to verify convergence properties. If len(n_sampling_gaussian_vec)==1, it is useless
# 11 guarantees excellent convergence.
# We choose it odd so that the central point of the Gaussian is included.
# Always choose it odd
n_sampling_gaussian_vec = self.sim_control_CZ.n_sampling_gaussian_vec()
for n_sampling_gaussian in n_sampling_gaussian_vec:
# If sigma=0 there's no need for sampling
if sigma_q0 != 0:
samplingpoints_gaussian_q0 = np.linspace(-5*sigma_q0,5*sigma_q0,n_sampling_gaussian) # after 5 sigmas we cut the integral
delta_x_q0 = samplingpoints_gaussian_q0[1]-samplingpoints_gaussian_q0[0]
values_gaussian_q0 = czf.gaussian(samplingpoints_gaussian_q0,mean,sigma_q0)
else:
samplingpoints_gaussian_q0 = np.array([0])
delta_x_q0 = 1
values_gaussian_q0 = np.array([1])
if sigma_q1 != 0:
samplingpoints_gaussian_q1 = np.linspace(-5*sigma_q1,5*sigma_q1,n_sampling_gaussian) # after 5 sigmas we cut the integral
delta_x_q1 = samplingpoints_gaussian_q1[1]-samplingpoints_gaussian_q1[0]
values_gaussian_q1 = czf.gaussian(samplingpoints_gaussian_q1,mean,sigma_q1)
else:
samplingpoints_gaussian_q1 = np.array([0])
delta_x_q1 = 1
values_gaussian_q1 = np.array([1])
# This is actually the input that was parallelized in an old version.
# Currently it just creates a list that is provided sequentially to compute_propagator
input_to_parallelize = []
weights=[]
number=-1 # used to number instruments that are created in the parallelization, to avoid conflicts in the cluster
for j_q0 in range(len(samplingpoints_gaussian_q0)):
fluxbias_q0 = samplingpoints_gaussian_q0[j_q0] # q0 fluxing qubit
for j_q1 in range(len(samplingpoints_gaussian_q1)):
fluxbias_q1 = samplingpoints_gaussian_q1[j_q1] # q1 spectator qubit
input_point = {'fluxbias_q0': fluxbias_q0,
'fluxbias_q1': fluxbias_q1,
'fluxlutman': self.fluxlutman,
'fluxlutman_static': self.fluxlutman_static,
'sim_control_CZ': self.sim_control_CZ,
'fitted_stepresponse_ty': self.fitted_stepresponse_ty}
weight = values_gaussian_q0[j_q0]*delta_x_q0 * values_gaussian_q1[j_q1]*delta_x_q1
weights.append(weight)
input_to_parallelize.append(input_point)
U_final_vec = []
t_final_vec = []
for input_arglist in input_to_parallelize:
result_list = compute_propagator(input_arglist)
if self.sim_control_CZ.double_cz_pi_pulses() != '':
# Experimenting with single qubit ideal pi pulses
if self.sim_control_CZ.double_cz_pi_pulses() == 'with_pi_pulses':
pi_single_qubit = qtp.Qobj([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# pi_pulse = qtp.tensor(pi_single_qubit, qtp.qeye(n_levels_q0))
pi_op = qtp.tensor(pi_single_qubit, pi_single_qubit)
# pi_super_op = qtp.to_super(pi_op)
U_final = result_list[0]
U_final = pi_op * U_final * pi_op * U_final
elif self.sim_control_CZ.double_cz_pi_pulses() == 'no_pi_pulses':
U_final = result_list[0]
U_final = U_final * U_final
t_final = 2 * result_list[1]
else:
U_final = result_list[0]
t_final = result_list[1]
U_final_vec.append(U_final)
t_final_vec.append(t_final)
t_final = t_final_vec[0] # equal for all entries, we need it to compute phases in the rotating frame
# needed to compute phases in the rotating frame, not used anymore
# w_q0, w_q1, alpha_q0, alpha_q1 = czf.dressed_frequencies(self.fluxlutman, self.fluxlutman_static, self.sim_control_CZ, which_gate=self.sim_control_CZ.which_gate())
# Reproducing Leo's plots of cond_phase and leakage vs. flux offset (I order vs II order)
# czf.sensitivity_to_fluxoffsets(U_final_vec,input_to_parallelize,t_final,self.fluxlutman,self.fluxlutman_static, which_gate=self.sim_control_CZ.which_gate())
for i in range(len(U_final_vec)):
if U_final_vec[i].type == 'oper':
U_final_vec[i] = qtp.to_super(U_final_vec[i]) # weighted averaging needs to be done for superoperators
U_final_vec[i] = U_final_vec[i] * weights[i]
U_superop_average = sum(U_final_vec) # computing resulting average propagator
# print(czf.verify_CPTP(U_superop_average))
qoi = czf.simulate_quantities_of_interest_superoperator_new(U=U_superop_average, t_final=t_final, fluxlutman=self.fluxlutman, fluxlutman_static=self.fluxlutman_static, which_gate=self.sim_control_CZ.which_gate())
# if we look only for the minimum avgatefid_pc in the heat maps,
# then we optimize the search via higher-order cost function
if self.sim_control_CZ.cost_func() is not None:
cost_func_val = self.sim_control_CZ.cost_func()(qoi)
elif self.sim_control_CZ.look_for_minimum():
cost_func_val = (np.log10(1 - qoi['avgatefid_compsubspace_pc']))**4 # sign removed for even powers
else:
cost_func_val = (-np.log10(1 - qoi['avgatefid_compsubspace_pc']))
quantities_of_interest = [cost_func_val, qoi['phi_cond'], qoi['L1']*100, qoi['L2']*100, qoi['avgatefid_pc']*100,
qoi['avgatefid_compsubspace_pc']*100, qoi['phase_q0'], qoi['phase_q1'],
qoi['avgatefid_compsubspace']*100, qoi['avgatefid_compsubspace_pc_onlystaticqubit']*100, qoi['population_02_state']*100,
qoi['cond_phase02'], qoi['coherent_leakage11']*100, qoi['offset_difference']*100, qoi['missing_fraction']*100,
qoi['population_transfer_12_21']*100,qoi['population_transfer_12_03']*100,
qoi['phase_diff_12_02'], qoi['phase_diff_21_20'], qoi['cond_phase12'], qoi['cond_phase21'], qoi['cond_phase03'], qoi['cond_phase20']
]
qoi_vec = np.array(quantities_of_interest)
qoi_plot.append(qoi_vec)
# To study the effect of the coherence of leakage on repeated CZs (simpler than simulating a full RB experiment):
# czf.repeated_CZs_decay_curves(U_superop_average,t_final,self.fluxlutman,self.fluxlutman_static, which_gate=self.sim_control_CZ.which_gate())
# czf.plot_spectrum(self.fluxlutman,self.fluxlutman_static, which_gate=self.sim_control_CZ.which_gate())
qoi_plot = np.array(qoi_plot)
# Uncomment to study the convergence properties of averaging over a Gaussian
# for i in range(len(qoi_plot[0])):
# czf.plot(x_plot_vec=[n_sampling_gaussian_vec],
# y_plot_vec=[qoi_plot[:,i]],
# title='Study of convergence of average',
# xlabel='n_sampling_gaussian points',ylabel=self.value_names[i])
return_values = [qoi_plot[0,0], qoi_plot[0,1], qoi_plot[0,2], qoi_plot[0,3], \
qoi_plot[0,4], qoi_plot[0,5], qoi_plot[0,6], \
qoi_plot[0,7], qoi_plot[0,8], qoi_plot[0,9], qoi_plot[0,10], \
qoi_plot[0,11], qoi_plot[0,12], qoi_plot[0,13], qoi_plot[0,14], qoi_plot[0,15], qoi_plot[0,16], qoi_plot[0,17], qoi_plot[0,18],
qoi_plot[0,19], qoi_plot[0,20], qoi_plot[0,21], qoi_plot[0,22]]
if self.qois != 'all':
return np.array(return_values)[self.qoi_mask]
else:
return return_values
| mit |
dhimmel/networkx | networkx/tests/test_convert_pandas.py | 43 | 2177 | from nose import SkipTest
from nose.tools import assert_true
import networkx as nx
class TestConvertPandas(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
try:
import pandas as pd
except ImportError:
raise SkipTest('Pandas not available.')
def __init__(self, ):
global pd
import pandas as pd
self.r = pd.np.random.RandomState(seed=5)
ints = self.r.random_integers(1, 10, size=(3,2))
a = ['A', 'B', 'C']
b = ['D', 'A', 'E']
df = pd.DataFrame(ints, columns=['weight', 'cost'])
df[0] = a # Column label 0 (int)
df['b'] = b # Column label 'b' (str)
self.df = df
def assert_equal(self, G1, G2):
assert_true( nx.is_isomorphic(G1, G2, edge_match=lambda x, y: x == y ))
def test_from_dataframe_all_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'cost': 9, 'weight': 10}),
('B', 'A', {'cost': 1, 'weight': 7}),
('A', 'D', {'cost': 7, 'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', True)
self.assert_equal(G, Gtrue)
def test_from_dataframe_multi_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'cost': 9, 'weight': 10}),
('B', 'A', {'cost': 1, 'weight': 7}),
('A', 'D', {'cost': 7, 'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', ['weight', 'cost'])
self.assert_equal(G, Gtrue)
def test_from_dataframe_one_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'weight': 10}),
('B', 'A', {'weight': 7}),
('A', 'D', {'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', 'weight')
self.assert_equal(G, Gtrue)
def test_from_dataframe_no_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {}),
('B', 'A', {}),
('A', 'D', {})])
G=nx.from_pandas_dataframe(self.df, 0, 'b',)
self.assert_equal(G, Gtrue)
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/examples/api/histogram_path_demo.py | 6 | 1444 | """
This example shows how to use a path patch to draw a bunch of
rectangles. The technique of using lots of Rectangle instances, or
the faster method of using PolyCollections, were implemented before we
had proper paths with moveto/lineto, closepoly etc in mpl. Now that
we have them, we can draw collections of regularly shaped objects with
homogeous properties more efficiently with a PathCollection. This
example makes a histogram -- its more work to set up the vertex arrays
at the outset, but it should be much faster for large numbers of
objects
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
fig, ax = plt.subplots()
# histogram our data with numpy
data = np.random.randn(1000)
n, bins = np.histogram(data, 50)
# get the corners of the rectangles for the histogram
left = np.array(bins[:-1])
right = np.array(bins[1:])
bottom = np.zeros(len(left))
top = bottom + n
# we need a (numrects x numsides x 2) numpy array for the path helper
# function to build a compound path
XY = np.array([[left,left,right,right], [bottom,top,top,bottom]]).T
# get the Path object
barpath = path.Path.make_compound_path_from_polys(XY)
# make a patch out of it
patch = patches.PathPatch(barpath, facecolor='blue', edgecolor='gray', alpha=0.8)
ax.add_patch(patch)
# update the view limits
ax.set_xlim(left[0], right[-1])
ax.set_ylim(bottom.min(), top.max())
plt.show()
| mit |
kiyoto/statsmodels | statsmodels/discrete/tests/test_constrained.py | 26 | 19635 | # -*- coding: utf-8 -*-
"""
Created on Fri May 30 16:22:29 2014
Author: Josef Perktold
License: BSD-3
"""
from statsmodels.compat.python import StringIO
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_
from nose import SkipTest
import pandas as pd
import patsy
from statsmodels.discrete.discrete_model import Poisson
from statsmodels.discrete.discrete_model import Logit
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
from statsmodels.base._constraints import fit_constrained
from statsmodels.tools.tools import add_constant
from statsmodels import datasets
spector_data = datasets.spector.load()
spector_data.exog = add_constant(spector_data.exog, prepend=False)
from .results import results_poisson_constrained as results
from .results import results_glm_logit_constrained as reslogit
DEBUG = False
ss='''\
agecat smokes deaths pyears
1 1 32 52407
2 1 104 43248
3 1 206 28612
4 1 186 12663
5 1 102 5317
1 0 2 18790
2 0 12 10673
3 0 28 5710
4 0 28 2585
5 0 31 1462'''
data = pd.read_csv(StringIO(ss), delimiter='\t')
data['logpyears'] = np.log(data['pyears'])
class CheckPoissonConstrainedMixin(object):
def test_basic(self):
res1 = self.res1
res2 = self.res2
assert_allclose(res1[0], res2.params[self.idx], rtol=1e-6)
# see below Stata has nan, we have zero
bse1 = np.sqrt(np.diag(res1[1]))
mask = (bse1 == 0) & np.isnan(res2.bse[self.idx])
assert_allclose(bse1[~mask], res2.bse[self.idx][~mask], rtol=1e-6)
def test_basic_method(self):
if hasattr(self, 'res1m'):
res1 = (self.res1m if not hasattr(self.res1m, '_results')
else self.res1m._results)
res2 = self.res2
assert_allclose(res1.params, res2.params[self.idx], rtol=1e-6)
# when a parameter is fixed, the Stata has bse=nan, we have bse=0
mask = (res1.bse == 0) & np.isnan(res2.bse[self.idx])
assert_allclose(res1.bse[~mask], res2.bse[self.idx][~mask], rtol=1e-6)
tvalues = res2.params_table[self.idx, 2]
# when a parameter is fixed, the Stata has tvalue=nan, we have tvalue=inf
mask = np.isinf(res1.tvalues) & np.isnan(tvalues)
assert_allclose(res1.tvalues[~mask], tvalues[~mask], rtol=1e-6)
pvalues = res2.params_table[self.idx, 3]
# note most pvalues are very small
# examples so far agree at 8 or more decimal, but rtol is stricter
# see above
mask = (res1.pvalues == 0) & np.isnan(pvalues)
assert_allclose(res1.pvalues[~mask], pvalues[~mask], rtol=5e-5)
ci_low = res2.params_table[self.idx, 4]
ci_upp = res2.params_table[self.idx, 5]
ci = np.column_stack((ci_low, ci_upp))
# note most pvalues are very small
# examples so far agree at 8 or more decimal, but rtol is stricter
# see above: nan versus value
assert_allclose(res1.conf_int()[~np.isnan(ci)], ci[~np.isnan(ci)], rtol=5e-5)
#other
assert_allclose(res1.llf, res2.ll, rtol=1e-6)
assert_equal(res1.df_model, res2.df_m)
# Stata doesn't have df_resid
df_r = res2.N - res2.df_m - 1
assert_equal(res1.df_resid, df_r)
else:
raise SkipTest("not available yet")
def test_other(self):
# some results may not be valid or available for all models
if hasattr(self, 'res1m'):
res1 = self.res1m
res2 = self.res2
if hasattr(res2, 'll_0'):
assert_allclose(res1.llnull, res2.ll_0, rtol=1e-6)
else:
if DEBUG:
import warnings
message = ('test: ll_0 not available, llnull=%6.4F'
% res1.llnull)
warnings.warn(message)
else:
raise SkipTest("not available yet")
class TestPoissonConstrained1a(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_noexposure_constraint
cls.idx = [7, 3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ logpyears + smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data)
#res1a = mod1a.fit()
# get start_params, example fails to converge on one py TravisCI
k_vars = len(mod.exog_names)
start_params = np.zeros(k_vars)
start_params[0] = np.log(mod.endog.mean())
# if we need it, this is desired params
p = np.array([-3.93478643, 1.37276214, 2.33077032, 2.71338891,
2.71338891, 0.57966535, 0.97254074])
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
start_params=start_params,
fit_kwds={'method': 'bfgs',
'disp': 0})
# TODO: Newton fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, start_params=start_params,
method='bfgs', disp=0)
def test_smoke(self):
# trailing text in summary, assumes it's the first extra string
#NOTE: see comment about convergence in llnull for self.res1m
summ = self.res1m.summary()
assert_('linear equality constraints' in summ.extra_txt)
summ = self.res1m.summary2()
assert_('linear equality constraints' in summ.extra_txt[0])
class TestPoissonConstrained1b(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_constraint
#cls.idx = [3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
exposure=data['pyears'].values)
#offset=np.log(data['pyears'].values))
#res1a = mod1a.fit()
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'method': 'newton',
'disp': 0})
cls.constraints = lc
# TODO: bfgs fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, method='newton',
disp=0)
class TestPoissonConstrained1c(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_constraint
#cls.idx = [3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
offset=np.log(data['pyears'].values))
#res1a = mod1a.fit()
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'method': 'newton',
'disp': 0})
cls.constraints = lc
# TODO: bfgs fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, method='newton', disp=0)
class TestPoissonNoConstrained(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_noconstraint
cls.idx = [6, 2, 3, 4, 5, 0] # 1 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
#exposure=data['pyears'].values)
offset=np.log(data['pyears'].values))
res1 = mod.fit(disp=0)._results
# res1 is duplicate check, so we can follow the same pattern
cls.res1 = (res1.params, res1.cov_params())
cls.res1m = res1
class TestPoissonConstrained2a(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_noexposure_constraint2
cls.idx = [7, 3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ logpyears + smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data)
# get start_params, example fails to converge on one py TravisCI
k_vars = len(mod.exog_names)
start_params = np.zeros(k_vars)
start_params[0] = np.log(mod.endog.mean())
# if we need it, this is desired params
p = np.array([-9.43762015, 1.52762442, 2.74155711, 3.58730007,
4.08730007, 1.15987869, 0.12111539])
constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
start_params=start_params,
fit_kwds={'method': 'bfgs', 'disp': 0})
# TODO: Newton fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, start_params=start_params,
method='bfgs', disp=0)
class TestPoissonConstrained2b(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_constraint2
#cls.idx = [3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
exposure=data['pyears'].values)
#offset=np.log(data['pyears'].values))
#res1a = mod1a.fit()
constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'method': 'newton',
'disp': 0})
cls.constraints = lc
# TODO: bfgs fails to converge. overflow somewhere?
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, method='bfgs', disp=0,
start_params=cls.res1[0])
class TestPoissonConstrained2c(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_constraint2
#cls.idx = [3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
offset=np.log(data['pyears'].values))
constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'method':'newton',
'disp': 0})
cls.constraints = lc
# TODO: bfgs fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr,
method='bfgs', disp=0,
start_params=cls.res1[0])
class TestGLMPoissonConstrained1a(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
from statsmodels.base._constraints import fit_constrained
cls.res2 = results.results_noexposure_constraint
cls.idx = [7, 3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ logpyears + smokes + C(agecat)'
mod = GLM.from_formula(formula, data=data,
family=families.Poisson())
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants)
cls.constraints = lc
cls.res1m = mod.fit_constrained(constr)
class TestGLMPoissonConstrained1b(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
from statsmodels.base._constraints import fit_constrained
cls.res2 = results.results_exposure_constraint
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example with offset
formula = 'deaths ~ smokes + C(agecat)'
mod = GLM.from_formula(formula, data=data,
family=families.Poisson(),
offset=np.log(data['pyears'].values))
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants)
cls.constraints = lc
cls.res1m = mod.fit_constrained(constr)._results
def test_compare_glm_poisson(self):
res1 = self.res1m
res2 = self.res2
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
exposure=data['pyears'].values)
#offset=np.log(data['pyears'].values))
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
res2 = mod.fit_constrained(constr, start_params=self.res1m.params,
method='newton', warn_convergence=False,
disp=0)
# we get high precision because we use the params as start_params
# basic, just as check that we have the same model
assert_allclose(res1.params, res2.params, rtol=1e-12)
assert_allclose(res1.bse, res2.bse, rtol=1e-12)
# check predict, fitted, ...
predicted = res1.predict()
assert_allclose(predicted, res2.predict(), rtol=1e-10)
assert_allclose(res1.mu, predicted, rtol=1e-10)
assert_allclose(res1.fittedvalues, predicted, rtol=1e-10)
assert_allclose(res2.predict(linear=True), res2.predict(linear=True),
rtol=1e-10)
class CheckGLMConstrainedMixin(CheckPoissonConstrainedMixin):
# add tests for some GLM specific attributes
def test_glm(self):
res2 = self.res2 # reference results
res1 = self.res1m
#assert_allclose(res1.aic, res2.aic, rtol=1e-10) # far away
# Stata aic in ereturn and in estat ic are very different
# we have the same as estat ic
# see issue #1733
assert_allclose(res1.aic, res2.infocrit[4], rtol=1e-10)
assert_allclose(res1.bic, res2.bic, rtol=1e-10)
# bic is deviance based
#assert_allclose(res1.bic, res2.infocrit[5], rtol=1e-10)
assert_allclose(res1.deviance, res2.deviance, rtol=1e-10)
# TODO: which chi2 are these
#assert_allclose(res1.pearson_chi2, res2.chi2, rtol=1e-10)
class TestGLMLogitConstrained1(CheckGLMConstrainedMixin):
@classmethod
def setup_class(cls):
cls.idx = slice(None)
# params sequence same as Stata, but Stata reports param = nan
# and we have param = value = 0
#res1ul = Logit(data.endog, data.exog).fit(method="newton", disp=0)
cls.res2 = reslogit.results_constraint1
mod1 = GLM(spector_data.endog, spector_data.exog,
family=families.Binomial())
constr = 'x1 = 2.8'
cls.res1m = mod1.fit_constrained(constr)
R, q = cls.res1m.constraints.coefs, cls.res1m.constraints.constants
cls.res1 = fit_constrained(mod1, R, q)
class TestGLMLogitConstrained2(CheckGLMConstrainedMixin):
@classmethod
def setup_class(cls):
cls.idx = slice(None) # params sequence same as Stata
#res1ul = Logit(data.endog, data.exog).fit(method="newton", disp=0)
cls.res2 = reslogit.results_constraint2
mod1 = GLM(spector_data.endog, spector_data.exog,
family=families.Binomial())
constr = 'x1 - x3 = 0'
cls.res1m = mod1.fit_constrained(constr)
R, q = cls.res1m.constraints.coefs, cls.res1m.constraints.constants
cls.res1 = fit_constrained(mod1, R, q)
cls.constraints_rq = (R, q)
def test_predict(self):
# results only available for this case
res2 = self.res2 # reference results
res1 = self.res1m
predicted = res1.predict()
assert_allclose(predicted, res2.predict_mu, atol=1e-7)
assert_allclose(res1.mu, predicted, rtol=1e-10)
assert_allclose(res1.fittedvalues, predicted, rtol=1e-10)
def test_smoke(self):
# trailing text in summary, assumes it's the first extra string
summ = self.res1m.summary()
assert_('linear equality constraints' in summ.extra_txt)
summ = self.res1m.summary2()
assert_('linear equality constraints' in summ.extra_txt[0])
def test_fit_constrained_wrap(self):
# minimal test
res2 = self.res2 # reference results
from statsmodels.base._constraints import fit_constrained_wrap
res_wrap = fit_constrained_wrap(self.res1m.model, self.constraints_rq)
assert_allclose(res_wrap.params, res2.params, rtol=1e-6)
assert_allclose(res_wrap.params, res2.params, rtol=1e-6)
def junk():
# Singular Matrix in mod1a.fit()
formula1 = 'deaths ~ smokes + C(agecat)'
formula2 = 'deaths ~ C(agecat) + C(smokes) : C(agecat)' # same as Stata default
mod = Poisson.from_formula(formula2, data=data, exposure=data['pyears'].values)
res0 = mod.fit()
constraints = 'C(smokes)[T.1]:C(agecat)[3] = C(smokes)[T.1]:C(agecat)[4]'
import patsy
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constraints)
R, q = lc.coefs, lc.constants
resc = mod.fit_constrained(R,q, fit_kwds={'method':'bfgs'})
# example without offset
formula1a = 'deaths ~ logpyears + smokes + C(agecat)'
mod1a = Poisson.from_formula(formula1a, data=data)
print(mod1a.exog.shape)
res1a = mod1a.fit()
lc_1a = patsy.DesignInfo(mod1a.exog_names).linear_constraint('C(agecat)[T.4] = C(agecat)[T.5]')
resc1a = mod1a.fit_constrained(lc_1a.coefs, lc_1a.constants, fit_kwds={'method':'newton'})
print(resc1a[0])
print(resc1a[1])
| bsd-3-clause |
walterreade/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 87 | 3903 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.externals.joblib import Memory
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_.reshape(1, -1))
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
jonyroda97/redbot-amigosprovaveis | lib/matplotlib/backends/backend_template.py | 2 | 9623 | """
This is a fully functional do nothing backend to provide a template to
backend writers. It is fully functional in that you can select it as
a backend with
import matplotlib
matplotlib.use('Template')
and your matplotlib scripts will (should!) run without error, though
no output is produced. This provides a nice starting point for
backend writers because you can selectively implement methods
(draw_rectangle, draw_lines, etc...) and slowly see your figure come
to life w/o having to have a full blown implementation before getting
any results.
Copy this to backend_xxx.py and replace all instances of 'template'
with 'xxx'. Then implement the class methods and functions below, and
add 'xxx' to the switchyard in matplotlib/backends/__init__.py and
'xxx' to the backends list in the validate_backend methon in
matplotlib/__init__.py and you're off. You can use your backend with::
import matplotlib
matplotlib.use('xxx')
from pylab import *
plot([1,2,3])
show()
matplotlib also supports external backends, so you can place you can
use any module in your PYTHONPATH with the syntax::
import matplotlib
matplotlib.use('module://my_backend')
where my_backend.py is your module name. This syntax is also
recognized in the rc file and in the -d argument in pylab, e.g.,::
python simple_plot.py -dmodule://my_backend
If your backend implements support for saving figures (i.e. has a print_xyz()
method) you can register it as the default handler for a given file type
from matplotlib.backend_bases import register_backend
register_backend('xyz', 'my_backend', 'XYZ File Format')
...
plt.savefig("figure.xyz")
The files that are most relevant to backend_writers are
matplotlib/backends/backend_your_backend.py
matplotlib/backend_bases.py
matplotlib/backends/__init__.py
matplotlib/__init__.py
matplotlib/_pylab_helpers.py
Naming Conventions
* classes Upper or MixedUpperCase
* varables lower or lowerUpper
* functions lower or underscore_separated
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox
class RendererTemplate(RendererBase):
"""
The renderer handles drawing/rendering operations.
This is a minimal do-nothing class that can be used to get started when
writing a new backend. Refer to backend_bases.RendererBase for
documentation of the classes methods.
"""
def __init__(self, dpi):
self.dpi = dpi
def draw_path(self, gc, path, transform, rgbFace=None):
pass
# draw_markers is optional, and we get more correct relative
# timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
# pass
# draw_path_collection is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_path_collection(self, gc, master_transform, paths,
# all_transforms, offsets, offsetTrans, facecolors,
# edgecolors, linewidths, linestyles,
# antialiaseds):
# pass
# draw_quad_mesh is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
# coordinates, offsets, offsetTrans, facecolors,
# antialiased, edgecolors):
# pass
def draw_image(self, gc, x, y, im):
pass
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
pass
def flipy(self):
return True
def get_canvas_width_height(self):
return 100, 100
def get_text_width_height_descent(self, s, prop, ismath):
return 1, 1, 1
def new_gc(self):
return GraphicsContextTemplate()
def points_to_pixels(self, points):
# if backend doesn't have dpi, e.g., postscript or svg
return points
# elif backend assumes a value for pixels_per_inch
#return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
#return points/72.0 * self.dpi.get()
class GraphicsContextTemplate(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc... See the gtk
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In GTK this is done by wrapping a gtk.gdk.GC object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the GTK backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, e.g., (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
pass
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For image backends - is not required
For GUI backends - this should be overridden if drawing should be done in
interactive python mode
"""
# May be implemented via the `_draw_if_interactive_template` helper.
def show():
"""
For image backends - is not required
For GUI backends - show() is usually the last line of a pylab script and
tells the backend that it is time to draw. In interactive mode, this may
be a do nothing func. See the GTK backend for an example of how to handle
interactive versus batch mode
"""
for manager in Gcf.get_all_fig_managers():
# do something to display the GUI
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# May be implemented via the `_new_figure_manager_template` helper.
# If a main-level app must be created, this (and
# new_figure_manager_given_figure) is the usual place to do it -- see
# backend_wx, backend_wxagg and backend_tkagg for examples. Not all GUIs
# require explicit instantiation of a main-level app (egg backend_gtk,
# backend_gtkagg) for pylab.
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
# May be implemented via the `_new_figure_manager_template` helper.
canvas = FigureCanvasTemplate(figure)
manager = FigureManagerTemplate(canvas, num)
return manager
class FigureCanvasTemplate(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Note GUI templates will want to connect events for button presses,
mouse movements and key presses to functions that call the base
class methods button_press_event, button_release_event,
motion_notify_event, key_press_event, and key_release_event. See,
e.g., backend_gtk.py, backend_wx.py and backend_tkagg.py
Attributes
----------
figure : `matplotlib.figure.Figure`
A high-level Figure instance
"""
def draw(self):
"""
Draw the figure using the renderer
"""
renderer = RendererTemplate(self.figure.dpi)
self.figure.draw(renderer)
# You should provide a print_xxx function for every file format
# you can write.
# If the file type is not in the base set of filetypes,
# you should add it to the class-scope filetypes dictionary as follows:
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['foo'] = 'My magic Foo format'
def print_foo(self, filename, *args, **kwargs):
"""
Write out format foo. The dpi, facecolor and edgecolor are restored
to their original values after this call, so you don't need to
save and restore them.
"""
pass
def get_default_filetype(self):
return 'foo'
class FigureManagerTemplate(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureCanvas = FigureCanvasTemplate
FigureManager = FigureManagerTemplate
| gpl-3.0 |
alexandrejaguar/strata-sv-2015-tutorial | resources/vizarray.py | 2 | 3634 | # encoding: utf-8
"""Vizualize NumPy arrays using ipythonblocks.
To enable the automatic vizualization of arrays::
import vizarray
vizarray.enable()
To disable this::
vizarray.disable()
To set the colormap (to any valid matplotlib colormap name)::
vizarray.set_cmap('jet')
To set the block_size in px (default is 30px)::
vizarray.set_block_size(10)
To turn off autoscaling of arrays:
vizarray.set_scale(False)
"""
import ipythonblocks as ipb
from ipythonblocks import BlockGrid
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
_cmap = 'jet'
_scale = True
_vmin = None
_vmax = None
_block_size = 30
def set_cmap(cmap_name):
"""Set the global value of cmap."""
global _cmap
_cmap = cmap_name
def set_scale(s):
"""Set the global value of scale."""
global _scale
_scale = s
def set_vmin(vm):
"""Set the global value of vmin."""
global _vmin
_vmin = vm
def set_vmax(vm):
"""Set the global value of vmax."""
global _vmax
_vmax = vm
def set_block_size(bs):
"""Set the global value of block_size."""
global _block_size
_block_size = bs
def list_colormaps():
"""List all of the matplotlib colormap strings."""
return sorted(m for m in plt.cm.datad if not m.endswith("_r"))
def _value_to_color(value, cmap):
"""Convert a value in the range [0,1] to an RGB tuple using a colormap."""
cm = plt.get_cmap(cmap)
rgba = cm(value)
return [int(round(255*v)) for v in rgba[0:3]]
def vizarray(x, cmap=None, scale=None, vmin=None, vmax=None, block_size=None):
"""Visualize a NumPy array using ipythonblocks."""
if not (x.ndim == 2 or x.ndim == 1):
raise TypeError('This function only works with 1 or 2 dimensional arrays')
global _cmap, _scale, _vmin, _vmax, _block_size
cmap = cmap if cmap is not None else _cmap
scale = scale if scale is not None else _scale
vmin = vmin if vmin is not None else _vmin
vmax = vmax if vmax is not None else _vmax
block_size = block_size if block_size is not None else _block_size
base = x.base if x.base is not None else None
data = x.copy()
if scale:
n = colors.Normalize(vmin=vmin, vmax=vmax)
if base is not None:
n.autoscale(base)
data = n(data)
if data.ndim == 1:
rows = 1
cols = data.shape[0]
bg = BlockGrid(cols, rows, block_size=block_size)
for col in range(cols):
bg[0,col] = _value_to_color(data[col], cmap)
elif data.ndim == 2:
rows = data.shape[0]
cols = data.shape[1]
bg = BlockGrid(cols, rows, block_size=block_size)
for row in range(rows):
for col in range(cols):
bg[row, col] = _value_to_color(data[row, col], cmap)
return bg
def _array_to_html(a):
return vizarray(a)._repr_html_()
def enable():
"""Enable automatic visualization of NumPy arrays in the IPython Notebook."""
try:
from IPython.core.getipython import get_ipython
except ImportError:
raise ImportError('This feature requires IPython 1.0+')
ip = get_ipython()
f = ip.display_formatter.formatters['text/html']
f.for_type(np.ndarray, _array_to_html)
def disable():
"""Disable automatic visualization of NumPy arrays in the IPython Notebook."""
try:
from IPython.core.getipython import get_ipython
except ImportError:
raise ImportError('This feature requires IPython 1.0+')
ip = get_ipython()
f = ip.display_formatter.formatters['text/html']
f.type_printers.pop(np.ndarray, None)
| bsd-3-clause |
toobaz/pandas | asv_bench/benchmarks/io/sql.py | 1 | 4950 | import sqlite3
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, date_range, read_sql_query, read_sql_table
from sqlalchemy import create_engine
class SQL:
params = ["sqlalchemy", "sqlite"]
param_names = ["connection"]
def setup(self, connection):
N = 10000
con = {
"sqlalchemy": create_engine("sqlite:///:memory:"),
"sqlite": sqlite3.connect(":memory:"),
}
self.table_name = "test_type"
self.query_all = "SELECT * FROM {}".format(self.table_name)
self.con = con[connection]
self.df = DataFrame(
{
"float": np.random.randn(N),
"float_with_nan": np.random.randn(N),
"string": ["foo"] * N,
"bool": [True] * N,
"int": np.random.randint(0, N, size=N),
"datetime": date_range("2000-01-01", periods=N, freq="s"),
},
index=tm.makeStringIndex(N),
)
self.df.loc[1000:3000, "float_with_nan"] = np.nan
self.df["datetime_string"] = self.df["datetime"].astype(str)
self.df.to_sql(self.table_name, self.con, if_exists="replace")
def time_to_sql_dataframe(self, connection):
self.df.to_sql("test1", self.con, if_exists="replace")
def time_read_sql_query(self, connection):
read_sql_query(self.query_all, self.con)
class WriteSQLDtypes:
params = (
["sqlalchemy", "sqlite"],
["float", "float_with_nan", "string", "bool", "int", "datetime"],
)
param_names = ["connection", "dtype"]
def setup(self, connection, dtype):
N = 10000
con = {
"sqlalchemy": create_engine("sqlite:///:memory:"),
"sqlite": sqlite3.connect(":memory:"),
}
self.table_name = "test_type"
self.query_col = "SELECT {} FROM {}".format(dtype, self.table_name)
self.con = con[connection]
self.df = DataFrame(
{
"float": np.random.randn(N),
"float_with_nan": np.random.randn(N),
"string": ["foo"] * N,
"bool": [True] * N,
"int": np.random.randint(0, N, size=N),
"datetime": date_range("2000-01-01", periods=N, freq="s"),
},
index=tm.makeStringIndex(N),
)
self.df.loc[1000:3000, "float_with_nan"] = np.nan
self.df["datetime_string"] = self.df["datetime"].astype(str)
self.df.to_sql(self.table_name, self.con, if_exists="replace")
def time_to_sql_dataframe_column(self, connection, dtype):
self.df[[dtype]].to_sql("test1", self.con, if_exists="replace")
def time_read_sql_query_select_column(self, connection, dtype):
read_sql_query(self.query_col, self.con)
class ReadSQLTable:
def setup(self):
N = 10000
self.table_name = "test"
self.con = create_engine("sqlite:///:memory:")
self.df = DataFrame(
{
"float": np.random.randn(N),
"float_with_nan": np.random.randn(N),
"string": ["foo"] * N,
"bool": [True] * N,
"int": np.random.randint(0, N, size=N),
"datetime": date_range("2000-01-01", periods=N, freq="s"),
},
index=tm.makeStringIndex(N),
)
self.df.loc[1000:3000, "float_with_nan"] = np.nan
self.df["datetime_string"] = self.df["datetime"].astype(str)
self.df.to_sql(self.table_name, self.con, if_exists="replace")
def time_read_sql_table_all(self):
read_sql_table(self.table_name, self.con)
def time_read_sql_table_parse_dates(self):
read_sql_table(
self.table_name,
self.con,
columns=["datetime_string"],
parse_dates=["datetime_string"],
)
class ReadSQLTableDtypes:
params = ["float", "float_with_nan", "string", "bool", "int", "datetime"]
param_names = ["dtype"]
def setup(self, dtype):
N = 10000
self.table_name = "test"
self.con = create_engine("sqlite:///:memory:")
self.df = DataFrame(
{
"float": np.random.randn(N),
"float_with_nan": np.random.randn(N),
"string": ["foo"] * N,
"bool": [True] * N,
"int": np.random.randint(0, N, size=N),
"datetime": date_range("2000-01-01", periods=N, freq="s"),
},
index=tm.makeStringIndex(N),
)
self.df.loc[1000:3000, "float_with_nan"] = np.nan
self.df["datetime_string"] = self.df["datetime"].astype(str)
self.df.to_sql(self.table_name, self.con, if_exists="replace")
def time_read_sql_table_column(self, dtype):
read_sql_table(self.table_name, self.con, columns=[dtype])
from ..pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
blink1073/scikit-image | doc/examples/edges/plot_convex_hull.py | 9 | 1487 | """
===========
Convex Hull
===========
The convex hull of a binary image is the set of pixels included in the
smallest convex polygon that surround all white pixels in the input.
In this example, we show how the input pixels (white) get filled in by the
convex hull (white and grey).
A good overview of the algorithm is given on `Steve Eddin's blog
<http://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/>`__.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.morphology import convex_hull_image
image = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=float)
original_image = np.copy(image)
chull = convex_hull_image(image)
image[chull] += 1
# image is now:
# [[ 0. 0. 0. 0. 0. 0. 0. 0. 0.]
# [ 0. 0. 0. 0. 2. 0. 0. 0. 0.]
# [ 0. 0. 0. 2. 1. 2. 0. 0. 0.]
# [ 0. 0. 2. 1. 1. 1. 2. 0. 0.]
# [ 0. 2. 1. 1. 1. 1. 1. 2. 0.]
# [ 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 6))
ax1.set_title('Original picture')
ax1.imshow(original_image, cmap=plt.cm.gray, interpolation='nearest')
ax1.set_xticks([]), ax1.set_yticks([])
ax2.set_title('Transformed picture')
ax2.imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax2.set_xticks([]), ax2.set_yticks([])
plt.show()
| bsd-3-clause |
kerrpy/kerrpy | kerrpy/utils/draw.py | 2 | 2556 | from ..universe import universe
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Circle
import mpl_toolkits.mplot3d.art3d as art3d
def spher2cart(points):
# Retrieve the actual data
r = points[:, 0]
theta = points[:, 1]
phi = points[:, 2]
cosT = np.cos(theta)
sinT = np.sin(theta)
cosP = np.cos(phi)
sinP = np.sin(phi)
x = r * sinT * cosP
y = r * sinT * sinP
z = r * cosT
return x, y, z
def drawErgoSphere(ax):
a2 = universe.spinSquared
# Draw black hole
u = np.linspace(0, 2 * np.pi, 50)
v = np.linspace(0, np.pi, 50)
r = (2 + np.sqrt(4 - 4*a2*np.square(np.cos(v)))) / 2
x = r * np.outer(np.cos(u), np.sin(v))
y = r * np.outer(np.sin(u), np.sin(v))
z = r * np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_wireframe(x, y, z)
def drawCameras(ax):
for camera in universe.cameras:
d = camera.r + camera.focalLength
H = camera.sensorSize[0] / 2
W = camera.sensorSize[1] / 2
points = np.array([
[d, W, H],
[d, -W, H],
[d, -W, -H],
[d, W, -H],
[d, W, H]
])
ax.plot(points[:, 0], points[:, 1], points[:, 2])
def drawAxes(ax, d=150):
ax.plot((-d, d), (0, 0), (0, 0), 'grey')
ax.plot((0, 0), (-d, d), (0, 0), 'grey')
ax.plot((0, 0), (0, 0), (-d, d), 'grey')
def drawBlackHole(ax):
# Draw horizon
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
r = universe.horizonRadius
x = r * np.outer(np.cos(u), np.sin(v))
y = r * np.outer(np.sin(u), np.sin(v))
z = r * np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x, y, z, rstride=4, cstride=4, color='black',
edgecolors='white', linewidth=0.15)
# Draw accretion disk
# circle1 = Circle((0, 0), universe.accretionDisk.innerRadius,
# facecolor='none')
# circle2 = Circle((0, 0), universe.accretionDisk.outerRadius,
# facecolor='none')
# ax.add_patch(circle1)
# ax.add_patch(circle2)
# art3d.pathpatch_2d_to_3d(circle1, z=0, zdir='z')
# art3d.pathpatch_2d_to_3d(circle2, z=0, zdir='z')
def drawGeodesic(ax, coordinates, colour):
# Compute cartesian coordinates of the ray
x, y, z = spher2cart(coordinates)
# Plot the ray!
ax.plot(x, y, z, label='Ray', color=colour, linewidth=1.5)
def drawScene(ax):
# drawAxes(ax)
drawBlackHole(ax)
drawCameras(ax)
# drawErgoSphere(ax)
| gpl-3.0 |
tosolveit/scikit-learn | sklearn/decomposition/pca.py | 192 | 23117 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Michael Eickenberg <michael.eickenberg@inria.fr>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
fabioticconi/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
nathania/pysal | pysal/contrib/spint/tests/test_gravity_stats.py | 8 | 12472 | """
Tests for statistics for gravity-style spatial interaction models
"""
__author__ = 'toshan'
import unittest
import numpy as np
import pandas as pd
import gravity as grav
import mle_stats as stats
class SingleParameter(unittest.TestCase):
"""Unit tests statistics when there is a single parameters estimated"""
def setUp(self):
self.f = np.array([0, 6469, 7629, 20036, 4690,
6194, 11688, 2243, 8857, 7248,
3559, 9221, 10099, 22866, 3388,
9986, 46618, 11639, 1380, 5261,
5985, 6731, 2704, 12250, 16132])
self.o = np.repeat(1, 25)
self.d = np.array(range(1, 26))
self.dij = np.array([0, 576, 946, 597, 373,
559, 707, 1208, 602, 692,
681, 1934, 332, 595, 906,
425, 755, 672, 1587, 526,
484, 2141, 2182, 410, 540])
self.pop = np.array([1596000, 2071000, 3376000, 6978000, 1345000,
2064000, 2378000, 1239000, 4435000, 1999000,
1274000, 7042000, 834000, 1268000, 1965000,
1046000, 12131000, 4824000, 969000, 2401000,
2410000, 2847000, 1425000, 1089000, 2909000])
self.dt = pd.DataFrame({'origins': self.o,
'destinations': self.d,
'pop': self.pop,
'Dij': self.dij,
'flows': self.f})
def test_single_parameter(self):
model = grav.ProductionConstrained(self.dt, 'origins', 'destinations', 'flows',
['pop'], 'Dij', 'pow')
ss = {'obs_mean_trip_len': 736.52834197296534,
'pred_mean_trip_len': 734.40974204773784,
'OD_pairs': 24,
'predicted_flows': 242873.00000000003,
'avg_dist_trav': 737.0,
'num_destinations': 24,
'observed_flows': 242873,
'avg_dist': 851.0,
'num_origins': 1}
ps = {'beta': {'LL_zero_val': -3.057415839736517,
'relative_likelihood_stat': 24833.721614296166,
'standard_error': 0.0052734418614330883},
'all_params': {'zero_vals_LL': -3.1780538303479453,
'mle_vals_LL': -3.0062909275101761},
'pop': {'LL_zero_val': -3.1773474269437778,
'relative_likelihood_stat': 83090.010373874276,
'standard_error': 0.0027673052892085684}}
fs = {'r_squared': 0.60516003720997413,
'srmse': 0.57873206718148507}
es = {'pred_obs_deviance': 0.1327,
'entropy_ratio': 0.5642,
'maximum_entropy': 3.1781,
'max_pred_deviance': 0.1718,
'variance_obs_entropy': 2.55421e-06,
'predicted_entropy': 3.0063,
't_stat_entropy': 66.7614,
'max_obs_deviance': 0.3045,
'observed_entropy': 2.8736,
'variance_pred_entropy': 1.39664e-06}
sys_stats = stats.sys_stats(model)
self.assertAlmostEqual(model.system_stats['obs_mean_trip_len'], ss['obs_mean_trip_len'], 4)
self.assertAlmostEqual(model.system_stats['pred_mean_trip_len'], ss['pred_mean_trip_len'], 4)
self.assertAlmostEqual(model.system_stats['OD_pairs'], ss['OD_pairs'])
self.assertAlmostEqual(model.system_stats['predicted_flows'], ss['predicted_flows'])
self.assertAlmostEqual(model.system_stats['avg_dist_trav'], ss['avg_dist_trav'])
self.assertAlmostEqual(model.system_stats['num_destinations'], ss['num_destinations'])
self.assertAlmostEqual(model.system_stats['observed_flows'], ss['observed_flows'])
self.assertAlmostEqual(model.system_stats['avg_dist'], ss['avg_dist'], 4)
self.assertAlmostEqual(model.system_stats['num_origins'], ss['num_origins'])
param_stats = stats.param_stats(model)
self.assertAlmostEqual(model.parameter_stats['beta']['LL_zero_val'], ps['beta']['LL_zero_val'], 4)
self.assertAlmostEqual(model.parameter_stats['beta']['relative_likelihood_stat'],
ps['beta']['relative_likelihood_stat'], 4)
self.assertAlmostEqual(model.parameter_stats['beta']['standard_error'], ps['beta']['standard_error'], 4)
self.assertAlmostEqual(model.parameter_stats['pop']['LL_zero_val'], ps['pop']['LL_zero_val'], 4)
self.assertAlmostEqual(model.parameter_stats['pop']['relative_likelihood_stat'],
ps['pop']['relative_likelihood_stat'], 4)
self.assertAlmostEqual(model.parameter_stats['pop']['standard_error'], ps['pop']['standard_error'], 4)
self.assertAlmostEqual(model.parameter_stats['all_params']['zero_vals_LL'], ps['all_params']['zero_vals_LL'], 4)
self.assertAlmostEqual(model.parameter_stats['all_params']['mle_vals_LL'], ps['all_params']['mle_vals_LL'], 4)
fit_stats = stats.fit_stats(model)
self.assertAlmostEqual(model.fit_stats['r_squared'], fs['r_squared'], 4)
self.assertAlmostEqual(model.fit_stats['srmse'], fs['srmse'], 4)
ent_stats = stats.ent_stats(model)
self.assertAlmostEqual(model.entropy_stats['pred_obs_deviance'], es['pred_obs_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['entropy_ratio'], es['entropy_ratio'], 4)
self.assertAlmostEqual(model.entropy_stats['maximum_entropy'], es['maximum_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['max_pred_deviance'], es['max_pred_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['variance_obs_entropy'], es['variance_obs_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['predicted_entropy'], es['predicted_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['t_stat_entropy'], es['t_stat_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['max_obs_deviance'], es['max_obs_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['observed_entropy'], es['observed_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['variance_pred_entropy'], es['variance_pred_entropy'], 4)
class MultipleParameter(unittest.TestCase):
"""Unit tests statistics when there are multiple parameters estimated"""
def setUp(self):
self.f = np.array([0, 180048, 79223, 26887, 198144, 17995, 35563, 30528, 110792,
283049, 0, 300345, 67280, 718673, 55094, 93434, 87987, 268458,
87267, 237229, 0, 281791, 551483, 230788, 178517, 172711, 394481,
29877, 60681, 286580, 0, 143860, 49892, 185618, 181868, 274629,
130830, 382565, 346407, 92308, 0, 252189, 192223, 89389, 279739,
21434, 53772, 287340, 49828, 316650, 0, 141679, 27409, 87938,
30287, 64645, 161645, 144980, 199466, 121366, 0, 134229, 289880,
21450, 43749, 97808, 113683, 89806, 25574, 158006, 0, 437255,
72114, 133122, 229764, 165405, 266305, 66324, 252039, 342948, 0])
self.o = np.repeat(np.array(range(1, 10)), 9)
self.d = np.tile(np.array(range(1, 10)), 9)
self.dij = np.array([0, 219, 1009, 1514, 974, 1268, 1795, 2420, 3174,
219, 0, 831, 1336, 755, 1049, 1576, 2242, 2996,
1009, 831, 0, 505, 1019, 662, 933, 1451, 2205,
1514, 1336, 505, 0, 1370, 888, 654, 946, 1700,
974, 755, 1019, 1370, 0, 482, 1144, 2278, 2862,
1268, 1049, 662, 888, 482, 0, 662, 1795, 2380,
1795, 1576, 933, 654, 1144, 662, 0, 1287, 1779,
2420, 2242, 1451, 946, 2278, 1795, 1287, 0, 754,
3147, 2996, 2205, 1700, 2862, 2380, 1779, 754, 0])
self.dt = pd.DataFrame({'Origin': self.o,
'Destination': self.d,
'flows': self.f,
'Dij': self.dij})
def test_multiple_parameter(self):
model = grav.DoublyConstrained(self.dt, 'Origin', 'Destination', 'flows', 'Dij', 'exp')
ss = {'obs_mean_trip_len': 1250.9555521611339,
'pred_mean_trip_len': 1250.9555521684863,
'OD_pairs': 72, 'predicted_flows': 12314322.0,
'avg_dist_trav': 1251.0, 'num_destinations': 9,
'observed_flows': 12314322, 'avg_dist': 1414.0,
'num_origins': 9}
ps = {'beta': {'LL_zero_val': -4.1172103581711941,
'relative_likelihood_stat': 2053596.3814015209,
'standard_error': 4.9177433418433932e-07},
'all_params': {'zero_vals_LL': -4.1172102183395936,
'mle_vals_LL': -4.0338279201692675}}
fs = {'r_squared': 0.89682406680906979,
'srmse': 0.24804939821988789}
es = {'pred_obs_deviance': 0.0314,
'entropy_ratio': 0.8855,
'maximum_entropy': 4.2767,
'max_pred_deviance': 0.2429,
'variance_obs_entropy': 3.667e-08,
'predicted_entropy': 4.0338,
't_stat_entropy': 117.1593,
'max_obs_deviance': 0.2743,
'observed_entropy': 4.0024,
'variance_pred_entropy': 3.516e-08}
sys_stats = stats.sys_stats(model)
self.assertAlmostEqual(model.system_stats['obs_mean_trip_len'], ss['obs_mean_trip_len'], 4)
self.assertAlmostEqual(model.system_stats['pred_mean_trip_len'], ss['pred_mean_trip_len'], 4)
self.assertAlmostEqual(model.system_stats['OD_pairs'], ss['OD_pairs'])
self.assertAlmostEqual(model.system_stats['predicted_flows'], ss['predicted_flows'])
self.assertAlmostEqual(model.system_stats['avg_dist_trav'], ss['avg_dist_trav'])
self.assertAlmostEqual(model.system_stats['num_destinations'], ss['num_destinations'])
self.assertAlmostEqual(model.system_stats['observed_flows'], ss['observed_flows'])
self.assertAlmostEqual(model.system_stats['avg_dist'], ss['avg_dist'], 4)
self.assertAlmostEqual(model.system_stats['num_origins'], ss['num_origins'])
param_stats = stats.param_stats(model)
self.assertAlmostEqual(model.parameter_stats['beta']['LL_zero_val'], ps['beta']['LL_zero_val'], 4)
self.assertAlmostEqual(model.parameter_stats['beta']['relative_likelihood_stat'],
ps['beta']['relative_likelihood_stat'], 4)
self.assertAlmostEqual(model.parameter_stats['beta']['standard_error'], ps['beta']['standard_error'], 4)
self.assertAlmostEqual(model.parameter_stats['all_params']['zero_vals_LL'], ps['all_params']['zero_vals_LL'], 4)
self.assertAlmostEqual(model.parameter_stats['all_params']['mle_vals_LL'], ps['all_params']['mle_vals_LL'], 4)
fit_stats = stats.fit_stats(model)
self.assertAlmostEqual(model.fit_stats['r_squared'], fs['r_squared'], 4)
self.assertAlmostEqual(model.fit_stats['srmse'], fs['srmse'], 4)
ent_stats = stats.ent_stats(model)
self.assertAlmostEqual(model.entropy_stats['pred_obs_deviance'], es['pred_obs_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['entropy_ratio'], es['entropy_ratio'], 4)
self.assertAlmostEqual(model.entropy_stats['maximum_entropy'], es['maximum_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['max_pred_deviance'], es['max_pred_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['variance_obs_entropy'], es['variance_obs_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['predicted_entropy'], es['predicted_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['t_stat_entropy'], es['t_stat_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['max_obs_deviance'], es['max_obs_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['observed_entropy'], es['observed_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['variance_pred_entropy'], es['variance_pred_entropy'], 4)
if __name__ == '__main__':
unittest.main() | bsd-3-clause |
micmn/shogun | applications/tapkee/swissroll_embedding.py | 12 | 2600 | import numpy
numpy.random.seed(40)
tt = numpy.genfromtxt('../../data/toy/swissroll_color.dat',unpack=True).T
X = numpy.genfromtxt('../../data/toy/swissroll.dat',unpack=True).T
N = X.shape[1]
converters = []
from shogun import LocallyLinearEmbedding
lle = LocallyLinearEmbedding()
lle.set_k(9)
converters.append((lle, "LLE with k=%d" % lle.get_k()))
from shogun import MultidimensionalScaling
mds = MultidimensionalScaling()
converters.append((mds, "Classic MDS"))
lmds = MultidimensionalScaling()
lmds.set_landmark(True)
lmds.set_landmark_number(20)
converters.append((lmds,"Landmark MDS with %d landmarks" % lmds.get_landmark_number()))
from shogun import Isomap
cisomap = Isomap()
cisomap.set_k(9)
converters.append((cisomap,"Isomap with k=%d" % cisomap.get_k()))
from shogun import DiffusionMaps
from shogun import GaussianKernel
dm = DiffusionMaps()
dm.set_t(2)
dm.set_width(1000.0)
converters.append((dm,"Diffusion Maps with t=%d, sigma=%.1f" % (dm.get_t(),dm.get_width())))
from shogun import HessianLocallyLinearEmbedding
hlle = HessianLocallyLinearEmbedding()
hlle.set_k(6)
converters.append((hlle,"Hessian LLE with k=%d" % (hlle.get_k())))
from shogun import LocalTangentSpaceAlignment
ltsa = LocalTangentSpaceAlignment()
ltsa.set_k(6)
converters.append((ltsa,"LTSA with k=%d" % (ltsa.get_k())))
from shogun import LaplacianEigenmaps
le = LaplacianEigenmaps()
le.set_k(20)
le.set_tau(100.0)
converters.append((le,"Laplacian Eigenmaps with k=%d, tau=%d" % (le.get_k(),le.get_tau())))
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
new_mpl = False
try:
swiss_roll_fig = fig.add_subplot(3,3,1, projection='3d')
new_mpl = True
except:
figure = plt.figure()
swiss_roll_fig = Axes3D(figure)
swiss_roll_fig.scatter(X[0], X[1], X[2], s=10, c=tt, cmap=plt.cm.Spectral)
swiss_roll_fig._axis3don = False
plt.suptitle('Swissroll embedding',fontsize=9)
plt.subplots_adjust(hspace=0.4)
from shogun import RealFeatures
for (i, (converter, label)) in enumerate(converters):
X = numpy.genfromtxt('../../data/toy/swissroll.dat',unpack=True).T
features = RealFeatures(X)
converter.set_target_dim(2)
converter.parallel.set_num_threads(1)
new_feats = converter.embed(features).get_feature_matrix()
if not new_mpl:
embedding_subplot = fig.add_subplot(4,2,i+1)
else:
embedding_subplot = fig.add_subplot(3,3,i+2)
embedding_subplot.scatter(new_feats[0],new_feats[1], c=tt, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title(label,fontsize=9)
print converter.get_name(), 'done'
plt.show()
| gpl-3.0 |
georgetown-analytics/skidmarks | bin/playin.py | 1 | 1374 | import csv
import pandas as pd
df = pd.read_csv('./output/trip/1_1.csv')
'''
#This is the pandas library code to index and return the values of a column; the first number is rows, second is columns.
The ':' is used to represent "through these values". For example, 1:10 symbolizes numbers 1 through values 10.
'''
df.loc[1:]['Direction (deg)']
# Code to get columnar aggregate values
df2.loc[1:]['Velocity (mph)'].describe()
from itertools import islice
numbers = iter(range(10))
for i in numbers:
print i
next(islice(numbers, 3, 3), None)
from itertools import islice
numbers = df.loc[700:750]['Change in Direction per s']
windows = []
for i in numbers:
print i
window = next(islice(numbers, 3, 7), None)
windows.append(window)
# takes a sliced up list of numbers from a window and selects the fourth index (i.e [3]).
print list(islice(numbers,start,None,1))[3]
# Rolling windows to calculate turns; need to use negative turns for
numbers = df.loc[700:750]['Change in Direction per s']
print pd.rolling_sum(numbers, window = 3)
# takes a sstandard deviation of a slice of degrees.
statistics.stdev(df.loc[10:15]['Direction (deg)'])
for s in list(islice(numbers,0,None,3)):
print s
crazy = pd.rolling_sum(numbers, windows = 3)
for l in crazy:
if l >= 45:
print "turn"
else:
print "nothing"
| mit |
alexsavio/scikit-learn | examples/cluster/plot_kmeans_digits.py | 42 | 4491 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(82 * '_')
print('init\t\ttime\tinertia\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(82 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
adyavanapalli/Muon-Mass | Gabe_Owen_.py | 1 | 3641 | import numpy as np
from matplotlib import pyplot as plt
import math
import random as ran
from scipy.integrate import quad
from scipy import interpolate
pi = np.pi
sqrt = np.sqrt
cos = np.cos
sin = np.sin
#Assign constant
hbar = 6.582*10**(-24)
gw = 10**(-6)
MW = .0008039 #MeV
N = 10000
Nexp = 43.
tp = 0.9525 #cm
R = 7.62 #cm
tg = 0.635 #cm
thetamax = pi/6. #degrees
S0 = 5.09 #MeV/cm
X0 = 8.9 #cm
Ecrit = S0*X0 #MeV
mc2 = np.arange(90.,110.,.5) #MeV
d = []
#Begin loop over each muon guess mass
for p in range(len(mc2)):
#Functions
def P(Ee):
return (16/mc2[p]**5)*((mc2[p]*Ee)**2)*(3-(4*Ee/mc2[p]))
def P_radius(guess):
return (R/2)*math.sqrt(guess)
def l(Ee):
return X0*math.log(1+(Ee/Ecrit))
def lesc(r,phi,theta):
return ((-r*(cos(phi))+sqrt((r**2)*((cos(phi))**2)+((R**2)-(r**2))))/(sin(theta)))*((tp)/(tp+tg))
def ns(lsub,theta,z0):
return 1 + math.floor((lsub*cos(theta)-z0)/(tp))
def Ne(r,Ee):
return (Nexp/N)*r*((mc2[p]*Ee)**2)*(3-((4*Ee)/(mc2[p])))
#Data
n_s = np.array([3,4,5,6,7])
N_exp = np.array([3.949275,7.137681,13.514493,15.615942,4.021739])
N_exp_err = np.array([2.119565,2.807971,3.804348,3.985507,2.119565])
#Assign Variables
z0 = np.random.uniform(0,tp,N)
r = []
theta = np.random.uniform(0,thetamax,N)
phi = np.random.uniform(0,2*pi,N)
M = 8*10**(14)
a = 0.0
b = mc2[p]/2.
x = np.random.uniform(a,b,N)
u = np.random.uniform(0.0,M,N)
Ee = []
for i in range(N):
choice = 0
while choice != 1:
yGuess = ran.uniform(0,P(.5*mc2[p]))
xGuess = ran.uniform(0,.5*mc2[p])
if yGuess <= P(xGuess):
choice = 1
Ee.append(xGuess)
guess=ran.uniform(0,1)
r.append(P_radius(guess))
plt.figure()
plt.hist(Ee, bins=50,histtype='step',color='r')
plt.xlabel("Product Electron Energy (MeV)")
plt.figtext(.15,.7,r"$m_{\mu}c^{2}$ = %3.2f $\rm{MeV}$"%(mc2[p]))
h_nsi = []
h_Nei = []
h_PEei = []
t = 0
for i in range(len(Ee)):
li = l(Ee[i])
lesci = lesc(r[i],phi[i],theta[i])
if li > lesci:
li = lesci
PEei = P(Ee[i])
h_PEei.append(PEei)
nsi = ns(li,theta[i],z0[i])
Nei = Ne(r[i],Ee[i])
h_Nei.append(Nei)
h_nsi.append(nsi)
weights = np.empty_like(Ee)
weights.fill(Nexp * 8 / (8-0) / len(Ee))
plt.figure()
h = plt.hist(h_nsi, bins=8, range=(0, 8), weights=weights,histtype='step',color='b',align='left',label='Simulation')
plt.errorbar(n_s,N_exp,yerr=N_exp_err,fmt='ok',label='Experiment')
plt.xlim(0.,9.)
plt.xlabel('Number of Sparks')
plt.ylabel('Number of Muon Decays')
plt.figtext(.15,.7,r"$m_{\mu}c^{2}$ = %3.2f $\rm{MeV}$"%(mc2[p]))
plt.figtext(.15,.65,r"$\theta_{max}$ = $30^{\circ}$")
plt.legend(loc=2)
print ("Muon mass =",mc2[p],"MeV")
j = h[0]
k = h[1]
t = 0
t = ((N_exp[0] - j[3])/N_exp_err[0])**2 +((N_exp[1] - j[4])/N_exp_err[1])**2 +((N_exp[2] - j[5])/N_exp_err[2])**2 +((N_exp[3] - j[6])/N_exp_err[3])**2 +((N_exp[4] - j[7])/N_exp_err[4])**2
print ("Sum of Square Residuals = ",t)
d.append(t)
np.savez("P440_muon_data",mc2=mc2,d=d)
m = mc2[d.index(min(d))]
print ("The mass of a muon is",m,"MeV")
plt.figure()
plt.plot(mc2,d,'-ok')
plt.xlabel(r'$\rm{m_{\mu}c^{2}}$ (MeV)')
plt.ylabel('Sum of Squared Residuals')
plt.show()
| mit |
466152112/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/site-packages/dask/dataframe/tests/test_dataframe.py | 2 | 108508 | import sys
import textwrap
from distutils.version import LooseVersion
from itertools import product
from operator import add
import pandas as pd
import pandas.util.testing as tm
import numpy as np
import pytest
import dask
import dask.array as da
import dask.dataframe as dd
from dask.base import compute_as_if_collection
from dask.utils import put_lines, M
from dask.dataframe.core import repartition_divisions, aca, _concat, Scalar
from dask.dataframe import methods
from dask.dataframe.utils import (assert_eq, make_meta, assert_max_deps,
PANDAS_VERSION)
if PANDAS_VERSION >= '0.20.0':
from pandas.io.formats import format as pandas_format
else:
from pandas.formats import format as pandas_format
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
meta = make_meta({'a': 'i8', 'b': 'i8'}, index=pd.Index([], 'i8'))
d = dd.DataFrame(dsk, 'x', meta, [0, 5, 9, 9])
full = d.compute()
def test_Dataframe():
expected = pd.Series([2, 3, 4, 5, 6, 7, 8, 9, 10],
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
name='a')
assert_eq(d['a'] + 1, expected)
tm.assert_index_equal(d.columns, pd.Index(['a', 'b']))
assert_eq(d[d['b'] > 2], full[full['b'] > 2])
assert_eq(d[['a', 'b']], full[['a', 'b']])
assert_eq(d.a, full.a)
assert d.b.mean().compute() == full.b.mean()
assert np.allclose(d.b.var().compute(), full.b.var())
assert np.allclose(d.b.std().compute(), full.b.std())
assert d.index._name == d.index._name # this is deterministic
assert repr(d)
def test_head_tail():
assert_eq(d.head(2), full.head(2))
assert_eq(d.head(3), full.head(3))
assert_eq(d.head(2), dsk[('x', 0)].head(2))
assert_eq(d['a'].head(2), full['a'].head(2))
assert_eq(d['a'].head(3), full['a'].head(3))
assert_eq(d['a'].head(2), dsk[('x', 0)]['a'].head(2))
assert (sorted(d.head(2, compute=False).dask) ==
sorted(d.head(2, compute=False).dask))
assert (sorted(d.head(2, compute=False).dask) !=
sorted(d.head(3, compute=False).dask))
assert_eq(d.tail(2), full.tail(2))
assert_eq(d.tail(3), full.tail(3))
assert_eq(d.tail(2), dsk[('x', 2)].tail(2))
assert_eq(d['a'].tail(2), full['a'].tail(2))
assert_eq(d['a'].tail(3), full['a'].tail(3))
assert_eq(d['a'].tail(2), dsk[('x', 2)]['a'].tail(2))
assert (sorted(d.tail(2, compute=False).dask) ==
sorted(d.tail(2, compute=False).dask))
assert (sorted(d.tail(2, compute=False).dask) !=
sorted(d.tail(3, compute=False).dask))
def test_head_npartitions():
assert_eq(d.head(5, npartitions=2), full.head(5))
assert_eq(d.head(5, npartitions=2, compute=False), full.head(5))
assert_eq(d.head(5, npartitions=-1), full.head(5))
assert_eq(d.head(7, npartitions=-1), full.head(7))
assert_eq(d.head(2, npartitions=-1), full.head(2))
with pytest.raises(ValueError):
d.head(2, npartitions=5)
@pytest.mark.skipif(sys.version_info[:2] == (3, 3),
reason="Python3.3 uses pytest2.7.2, w/o warns method")
def test_head_npartitions_warn():
with pytest.warns(None):
d.head(100)
with pytest.warns(None):
d.head(7)
with pytest.warns(None):
d.head(7, npartitions=2)
def test_index_head():
assert_eq(d.index.head(2), full.index[:2])
assert_eq(d.index.head(3), full.index[:3])
def test_Series():
assert isinstance(d.a, dd.Series)
assert isinstance(d.a + 1, dd.Series)
assert_eq((d + 1), full + 1)
def test_Index():
for case in [pd.DataFrame(np.random.randn(10, 5), index=list('abcdefghij')),
pd.DataFrame(np.random.randn(10, 5),
index=pd.date_range('2011-01-01', freq='D',
periods=10))]:
ddf = dd.from_pandas(case, 3)
assert_eq(ddf.index, case.index)
pytest.raises(AttributeError, lambda: ddf.index.index)
def test_Scalar():
val = np.int64(1)
s = Scalar({('a', 0): val}, 'a', 'i8')
assert hasattr(s, 'dtype')
assert 'dtype' in dir(s)
assert_eq(s, val)
assert repr(s) == "dd.Scalar<a, dtype=int64>"
val = pd.Timestamp('2001-01-01')
s = Scalar({('a', 0): val}, 'a', val)
assert not hasattr(s, 'dtype')
assert 'dtype' not in dir(s)
assert_eq(s, val)
assert repr(s) == "dd.Scalar<a, type=Timestamp>"
def test_attributes():
assert 'a' in dir(d)
assert 'foo' not in dir(d)
pytest.raises(AttributeError, lambda: d.foo)
df = dd.from_pandas(pd.DataFrame({'a b c': [1, 2, 3]}), npartitions=2)
assert 'a b c' not in dir(df)
df = dd.from_pandas(pd.DataFrame({'a': [1, 2], 5: [1, 2]}), npartitions=2)
assert 'a' in dir(df)
assert 5 not in dir(df)
df = dd.from_pandas(tm.makeTimeDataFrame(), npartitions=3)
pytest.raises(AttributeError, lambda: df.foo)
def test_column_names():
tm.assert_index_equal(d.columns, pd.Index(['a', 'b']))
tm.assert_index_equal(d[['b', 'a']].columns, pd.Index(['b', 'a']))
assert d['a'].name == 'a'
assert (d['a'] + 1).name == 'a'
assert (d['a'] + d['b']).name is None
def test_index_names():
assert d.index.name is None
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x')
df = pd.DataFrame(np.random.randn(10, 5), idx)
ddf = dd.from_pandas(df, 3)
assert ddf.index.name == 'x'
assert ddf.index.compute().name == 'x'
@pytest.mark.parametrize(
'npartitions',
[1, pytest.mark.xfail(2, reason='pandas join removes freq')]
)
def test_timezone_freq(npartitions):
s_naive = pd.Series(pd.date_range('20130101', periods=10))
s_aware = pd.Series(pd.date_range('20130101', periods=10, tz='US/Eastern'))
pdf = pd.DataFrame({'tz': s_aware, 'notz': s_naive})
ddf = dd.from_pandas(pdf, npartitions=npartitions)
assert pdf.tz[0].freq == ddf.compute().tz[0].freq == ddf.tz.compute()[0].freq
def test_rename_columns():
# GH 819
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
ddf.columns = ['x', 'y']
df.columns = ['x', 'y']
tm.assert_index_equal(ddf.columns, pd.Index(['x', 'y']))
tm.assert_index_equal(ddf._meta.columns, pd.Index(['x', 'y']))
assert_eq(ddf, df)
msg = r"Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with pytest.raises(ValueError) as err:
ddf.columns = [1, 2, 3, 4]
assert msg in str(err.value)
# Multi-index columns
df = pd.DataFrame({('A', '0') : [1, 2, 2, 3], ('B', 1) : [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
df.columns = ['x', 'y']
ddf.columns = ['x', 'y']
tm.assert_index_equal(ddf.columns, pd.Index(['x', 'y']))
tm.assert_index_equal(ddf._meta.columns, pd.Index(['x', 'y']))
assert_eq(ddf, df)
def test_rename_series():
# GH 819
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
ds = dd.from_pandas(s, 2)
s.name = 'renamed'
ds.name = 'renamed'
assert s.name == 'renamed'
assert_eq(ds, s)
ind = s.index
dind = ds.index
ind.name = 'renamed'
dind.name = 'renamed'
assert ind.name == 'renamed'
assert_eq(dind, ind)
def test_rename_series_method():
# Series name
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
ds = dd.from_pandas(s, 2)
assert_eq(ds.rename('y'), s.rename('y'))
assert ds.name == 'x' # no mutation
assert_eq(ds.rename(), s.rename())
ds.rename('z', inplace=True)
s.rename('z', inplace=True)
assert ds.name == 'z'
assert_eq(ds, s)
# Series index
s = pd.Series(['a', 'b', 'c', 'd', 'e', 'f', 'g'], name='x')
ds = dd.from_pandas(s, 2)
for is_sorted in [True, False]:
res = ds.rename(lambda x: x ** 2, sorted_index=is_sorted)
assert_eq(res, s.rename(lambda x: x ** 2))
assert res.known_divisions == is_sorted
res = ds.rename(s, sorted_index=is_sorted)
assert_eq(res, s.rename(s))
assert res.known_divisions == is_sorted
with pytest.raises(ValueError):
ds.rename(lambda x: -x, sorted_index=True)
assert_eq(ds.rename(lambda x: -x), s.rename(lambda x: -x))
res = ds.rename(ds)
assert_eq(res, s.rename(s))
assert not res.known_divisions
ds2 = ds.clear_divisions()
res = ds2.rename(lambda x: x**2, sorted_index=True)
assert_eq(res, s.rename(lambda x: x**2))
assert not res.known_divisions
res = ds.rename(lambda x: x**2, inplace=True, sorted_index=True)
assert res is ds
s.rename(lambda x: x**2, inplace=True)
assert_eq(ds, s)
def test_describe():
# prepare test case which approx quantiles will be the same as actuals
s = pd.Series(list(range(20)) * 4)
df = pd.DataFrame({'a': list(range(20)) * 4, 'b': list(range(4)) * 20})
ds = dd.from_pandas(s, 4)
ddf = dd.from_pandas(df, 4)
assert_eq(s.describe(), ds.describe())
assert_eq(df.describe(), ddf.describe())
assert_eq(s.describe(), ds.describe(split_every=2))
assert_eq(df.describe(), ddf.describe(split_every=2))
assert ds.describe(split_every=2)._name != ds.describe()._name
assert ddf.describe(split_every=2)._name != ddf.describe()._name
# remove string columns
df = pd.DataFrame({'a': list(range(20)) * 4, 'b': list(range(4)) * 20,
'c': list('abcd') * 20})
ddf = dd.from_pandas(df, 4)
assert_eq(df.describe(), ddf.describe())
assert_eq(df.describe(), ddf.describe(split_every=2))
def test_describe_empty():
# https://github.com/dask/dask/issues/2326
ddf = dd.from_pandas(pd.DataFrame({"A": ['a', 'b']}), 2)
with pytest.raises(ValueError) as rec:
ddf.describe()
assert 'DataFrame contains only non-numeric data.' in str(rec)
with pytest.raises(ValueError) as rec:
ddf.A.describe()
assert 'Cannot compute ``describe`` on object dtype.' in str(rec)
def test_cumulative():
df = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
df_out = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
ddf = dd.from_pandas(df, 5)
ddf_out = dd.from_pandas(df_out, 5)
assert_eq(ddf.cumsum(), df.cumsum())
assert_eq(ddf.cumprod(), df.cumprod())
assert_eq(ddf.cummin(), df.cummin())
assert_eq(ddf.cummax(), df.cummax())
assert_eq(ddf.cumsum(axis=1), df.cumsum(axis=1))
assert_eq(ddf.cumprod(axis=1), df.cumprod(axis=1))
assert_eq(ddf.cummin(axis=1), df.cummin(axis=1))
assert_eq(ddf.cummax(axis=1), df.cummax(axis=1))
# testing out parameter if out parameter supported
if LooseVersion(np.__version__) >= '1.13.0':
np.cumsum(ddf, out=ddf_out)
assert_eq(ddf_out, df.cumsum())
np.cumprod(ddf, out=ddf_out)
assert_eq(ddf_out, df.cumprod())
ddf.cummin(out=ddf_out)
assert_eq(ddf_out, df.cummin())
ddf.cummax(out=ddf_out)
assert_eq(ddf_out, df.cummax())
np.cumsum(ddf, out=ddf_out, axis=1)
assert_eq(ddf_out, df.cumsum(axis=1))
np.cumprod(ddf, out=ddf_out, axis=1)
assert_eq(ddf_out, df.cumprod(axis=1))
ddf.cummin(out=ddf_out, axis=1)
assert_eq(ddf_out, df.cummin(axis=1))
ddf.cummax(out=ddf_out, axis=1)
assert_eq(ddf_out, df.cummax(axis=1))
assert_eq(ddf.a.cumsum(), df.a.cumsum())
assert_eq(ddf.a.cumprod(), df.a.cumprod())
assert_eq(ddf.a.cummin(), df.a.cummin())
assert_eq(ddf.a.cummax(), df.a.cummax())
# With NaNs
df = pd.DataFrame({'a': [1, 2, np.nan, 4, 5, 6, 7, 8],
'b': [1, 2, np.nan, np.nan, np.nan, 5, np.nan, np.nan],
'c': [np.nan] * 8})
ddf = dd.from_pandas(df, 3)
assert_eq(df.cumsum(), ddf.cumsum())
assert_eq(df.cummin(), ddf.cummin())
assert_eq(df.cummax(), ddf.cummax())
assert_eq(df.cumprod(), ddf.cumprod())
assert_eq(df.cumsum(skipna=False), ddf.cumsum(skipna=False))
assert_eq(df.cummin(skipna=False), ddf.cummin(skipna=False))
assert_eq(df.cummax(skipna=False), ddf.cummax(skipna=False))
assert_eq(df.cumprod(skipna=False), ddf.cumprod(skipna=False))
assert_eq(df.cumsum(axis=1), ddf.cumsum(axis=1))
assert_eq(df.cummin(axis=1), ddf.cummin(axis=1))
assert_eq(df.cummax(axis=1), ddf.cummax(axis=1))
assert_eq(df.cumprod(axis=1), ddf.cumprod(axis=1))
assert_eq(df.cumsum(axis=1, skipna=False), ddf.cumsum(axis=1, skipna=False))
assert_eq(df.cummin(axis=1, skipna=False), ddf.cummin(axis=1, skipna=False))
assert_eq(df.cummax(axis=1, skipna=False), ddf.cummax(axis=1, skipna=False))
assert_eq(df.cumprod(axis=1, skipna=False), ddf.cumprod(axis=1, skipna=False))
def test_dropna():
df = pd.DataFrame({'x': [np.nan, 2, 3, 4, np.nan, 6],
'y': [1, 2, np.nan, 4, np.nan, np.nan],
'z': [1, 2, 3, 4, np.nan, np.nan]},
index=[10, 20, 30, 40, 50, 60])
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.x.dropna(), df.x.dropna())
assert_eq(ddf.y.dropna(), df.y.dropna())
assert_eq(ddf.z.dropna(), df.z.dropna())
assert_eq(ddf.dropna(), df.dropna())
assert_eq(ddf.dropna(how='all'), df.dropna(how='all'))
assert_eq(ddf.dropna(subset=['x']), df.dropna(subset=['x']))
assert_eq(ddf.dropna(subset=['y', 'z']), df.dropna(subset=['y', 'z']))
assert_eq(ddf.dropna(subset=['y', 'z'], how='all'),
df.dropna(subset=['y', 'z'], how='all'))
@pytest.mark.parametrize('lower, upper', [(2, 5), (2.5, 3.5)])
def test_clip(lower, upper):
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]})
ddf = dd.from_pandas(df, 3)
s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9])
ds = dd.from_pandas(s, 3)
assert_eq(ddf.clip(lower=lower, upper=upper),
df.clip(lower=lower, upper=upper))
assert_eq(ddf.clip(lower=lower), df.clip(lower=lower))
assert_eq(ddf.clip(upper=upper), df.clip(upper=upper))
assert_eq(ds.clip(lower=lower, upper=upper),
s.clip(lower=lower, upper=upper))
assert_eq(ds.clip(lower=lower), s.clip(lower=lower))
assert_eq(ds.clip(upper=upper), s.clip(upper=upper))
assert_eq(ddf.clip_lower(lower), df.clip_lower(lower))
assert_eq(ddf.clip_lower(upper), df.clip_lower(upper))
assert_eq(ddf.clip_upper(lower), df.clip_upper(lower))
assert_eq(ddf.clip_upper(upper), df.clip_upper(upper))
assert_eq(ds.clip_lower(lower), s.clip_lower(lower))
assert_eq(ds.clip_lower(upper), s.clip_lower(upper))
assert_eq(ds.clip_upper(lower), s.clip_upper(lower))
assert_eq(ds.clip_upper(upper), s.clip_upper(upper))
def test_squeeze():
df = pd.DataFrame({'x': [1, 3, 6]})
df2 = pd.DataFrame({'x':[0]})
s = pd.Series({'test': 0, 'b': 100})
ddf = dd.from_pandas(df, 3)
ddf2 = dd.from_pandas(df2, 3)
ds = dd.from_pandas(s, 2)
assert_eq(df.squeeze(), ddf.squeeze())
assert_eq(pd.Series([0], name='x'), ddf2.squeeze())
assert_eq(ds.squeeze(), s.squeeze())
with pytest.raises(NotImplementedError) as info:
ddf.squeeze(axis=0)
msg = "{0} does not support squeeze along axis 0".format(type(ddf))
assert msg in str(info.value)
with pytest.raises(ValueError) as info:
ddf.squeeze(axis=2)
msg = 'No axis {0} for object type {1}'.format(2, type(ddf))
assert msg in str(info.value)
with pytest.raises(ValueError) as info:
ddf.squeeze(axis='test')
msg = 'No axis test for object type {0}'.format(type(ddf))
assert msg in str(info.value)
def test_where_mask():
pdf1 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]})
ddf1 = dd.from_pandas(pdf1, 2)
pdf2 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3})
ddf2 = dd.from_pandas(pdf2, 2)
# different index
pdf3 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
ddf3 = dd.from_pandas(pdf3, 2)
pdf4 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13])
ddf4 = dd.from_pandas(pdf4, 2)
# different columns
pdf5 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [9, 4, 2, 6, 2, 3, 1, 6, 2],
'c': [5, 6, 7, 8, 9, 10, 11, 12, 13]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
ddf5 = dd.from_pandas(pdf5, 2)
pdf6 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3,
'c': [False] * 9,
'd': [True] * 9},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13])
ddf6 = dd.from_pandas(pdf6, 2)
cases = [(ddf1, ddf2, pdf1, pdf2),
(ddf1.repartition([0, 3, 6, 8]), ddf2, pdf1, pdf2),
(ddf1, ddf4, pdf3, pdf4),
(ddf3.repartition([0, 4, 6, 8]), ddf4.repartition([5, 9, 10, 13]),
pdf3, pdf4),
(ddf5, ddf6, pdf5, pdf6),
(ddf5.repartition([0, 4, 7, 8]), ddf6, pdf5, pdf6),
# use pd.DataFrame as cond
(ddf1, pdf2, pdf1, pdf2),
(ddf1, pdf4, pdf3, pdf4),
(ddf5, pdf6, pdf5, pdf6)]
for ddf, ddcond, pdf, pdcond in cases:
assert isinstance(ddf, dd.DataFrame)
assert isinstance(ddcond, (dd.DataFrame, pd.DataFrame))
assert isinstance(pdf, pd.DataFrame)
assert isinstance(pdcond, pd.DataFrame)
assert_eq(ddf.where(ddcond), pdf.where(pdcond))
assert_eq(ddf.mask(ddcond), pdf.mask(pdcond))
assert_eq(ddf.where(ddcond, -ddf), pdf.where(pdcond, -pdf))
assert_eq(ddf.mask(ddcond, -ddf), pdf.mask(pdcond, -pdf))
assert_eq(ddf.where(ddcond.a, -ddf), pdf.where(pdcond.a, -pdf))
assert_eq(ddf.mask(ddcond.a, -ddf), pdf.mask(pdcond.a, -pdf))
assert_eq(ddf.a.where(ddcond.a), pdf.a.where(pdcond.a))
assert_eq(ddf.a.mask(ddcond.a), pdf.a.mask(pdcond.a))
assert_eq(ddf.a.where(ddcond.a, -ddf.a), pdf.a.where(pdcond.a, -pdf.a))
assert_eq(ddf.a.mask(ddcond.a, -ddf.a), pdf.a.mask(pdcond.a, -pdf.a))
def test_map_partitions_multi_argument():
assert_eq(dd.map_partitions(lambda a, b: a + b, d.a, d.b),
full.a + full.b)
assert_eq(dd.map_partitions(lambda a, b, c: a + b + c, d.a, d.b, 1),
full.a + full.b + 1)
def test_map_partitions():
assert_eq(d.map_partitions(lambda df: df, meta=d), full)
assert_eq(d.map_partitions(lambda df: df), full)
result = d.map_partitions(lambda df: df.sum(axis=1))
assert_eq(result, full.sum(axis=1))
assert_eq(d.map_partitions(lambda df: 1), pd.Series([1, 1, 1], dtype=np.int64),
check_divisions=False)
x = Scalar({('x', 0): 1}, 'x', int)
result = dd.map_partitions(lambda x: 2, x)
assert result.dtype in (np.int32, np.int64) and result.compute() == 2
result = dd.map_partitions(lambda x: 4.0, x)
assert result.dtype == np.float64 and result.compute() == 4.0
def test_map_partitions_names():
func = lambda x: x
assert (sorted(dd.map_partitions(func, d, meta=d).dask) ==
sorted(dd.map_partitions(func, d, meta=d).dask))
assert (sorted(dd.map_partitions(lambda x: x, d, meta=d, token=1).dask) ==
sorted(dd.map_partitions(lambda x: x, d, meta=d, token=1).dask))
func = lambda x, y: x
assert (sorted(dd.map_partitions(func, d, d, meta=d).dask) ==
sorted(dd.map_partitions(func, d, d, meta=d).dask))
def test_map_partitions_column_info():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = dd.map_partitions(lambda x: x, a, meta=a)
tm.assert_index_equal(b.columns, a.columns)
assert_eq(df, b)
b = dd.map_partitions(lambda x: x, a.x, meta=a.x)
assert b.name == a.x.name
assert_eq(df.x, b)
b = dd.map_partitions(lambda x: x, a.x, meta=a.x)
assert b.name == a.x.name
assert_eq(df.x, b)
b = dd.map_partitions(lambda df: df.x + df.y, a)
assert isinstance(b, dd.Series)
assert b.dtype == 'i8'
b = dd.map_partitions(lambda df: df.x + 1, a, meta=('x', 'i8'))
assert isinstance(b, dd.Series)
assert b.name == 'x'
assert b.dtype == 'i8'
def test_map_partitions_method_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = a.map_partitions(lambda x: x)
assert isinstance(b, dd.DataFrame)
tm.assert_index_equal(b.columns, a.columns)
b = a.map_partitions(lambda df: df.x + 1)
assert isinstance(b, dd.Series)
assert b.dtype == 'i8'
b = a.map_partitions(lambda df: df.x + 1, meta=('x', 'i8'))
assert isinstance(b, dd.Series)
assert b.name == 'x'
assert b.dtype == 'i8'
def test_map_partitions_keeps_kwargs_readable():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
def f(s, x=1):
return s + x
b = a.x.map_partitions(f, x=5)
# NOTE: we'd like to ensure that we keep the keyword arguments readable
# in the dask graph
assert "['x', 5]" in str(b.dask)
assert_eq(df.x + 5, b)
assert a.x.map_partitions(f, x=5)._name != a.x.map_partitions(f, x=6)._name
def test_metadata_inference_single_partition_aligned_args():
# https://github.com/dask/dask/issues/3034
# Previously broadcastable series functionality broke this
df = pd.DataFrame({'x': [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=1)
def check(df, df_x):
assert len(df) == len(df_x)
assert len(df) > 0
return df
res = dd.map_partitions(check, ddf, ddf.x)
assert_eq(res, ddf)
def test_drop_duplicates():
res = d.drop_duplicates()
res2 = d.drop_duplicates(split_every=2)
sol = full.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = d.a.drop_duplicates()
res2 = d.a.drop_duplicates(split_every=2)
sol = full.a.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = d.index.drop_duplicates()
res2 = d.index.drop_duplicates(split_every=2)
sol = full.index.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
with pytest.raises(NotImplementedError):
d.drop_duplicates(keep=False)
def test_drop_duplicates_subset():
df = pd.DataFrame({'x': [1, 2, 3, 1, 2, 3],
'y': ['a', 'a', 'b', 'b', 'c', 'c']})
ddf = dd.from_pandas(df, npartitions=2)
for kwarg in [{'keep': 'first'}, {'keep': 'last'}]:
assert_eq(df.x.drop_duplicates(**kwarg),
ddf.x.drop_duplicates(**kwarg))
for ss in [['x'], 'y', ['x', 'y']]:
assert_eq(df.drop_duplicates(subset=ss, **kwarg),
ddf.drop_duplicates(subset=ss, **kwarg))
def test_get_partition():
pdf = pd.DataFrame(np.random.randn(10, 5), columns=list('abcde'))
ddf = dd.from_pandas(pdf, 3)
assert ddf.divisions == (0, 4, 8, 9)
# DataFrame
div1 = ddf.get_partition(0)
assert isinstance(div1, dd.DataFrame)
assert_eq(div1, pdf.loc[0:3])
div2 = ddf.get_partition(1)
assert_eq(div2, pdf.loc[4:7])
div3 = ddf.get_partition(2)
assert_eq(div3, pdf.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf)
# Series
div1 = ddf.a.get_partition(0)
assert isinstance(div1, dd.Series)
assert_eq(div1, pdf.a.loc[0:3])
div2 = ddf.a.get_partition(1)
assert_eq(div2, pdf.a.loc[4:7])
div3 = ddf.a.get_partition(2)
assert_eq(div3, pdf.a.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf.a)
with pytest.raises(ValueError):
ddf.get_partition(-1)
with pytest.raises(ValueError):
ddf.get_partition(3)
def test_ndim():
assert (d.ndim == 2)
assert (d.a.ndim == 1)
assert (d.index.ndim == 1)
def test_dtype():
assert (d.dtypes == full.dtypes).all()
def test_value_counts():
df = pd.DataFrame({'x': [1, 2, 1, 3, 3, 1, 4]})
ddf = dd.from_pandas(df, npartitions=3)
result = ddf.x.value_counts()
expected = df.x.value_counts()
assert_eq(result, expected)
result2 = ddf.x.value_counts(split_every=2)
assert_eq(result2, expected)
assert result._name != result2._name
def test_unique():
pdf = pd.DataFrame({'x': [1, 2, 1, 3, 3, 1, 4, 2, 3, 1],
'y': ['a', 'c', 'b', np.nan, 'c',
'b', 'a', 'd', np.nan, 'a']})
ddf = dd.from_pandas(pdf, npartitions=3)
assert_eq(ddf.x.unique(), pd.Series(pdf.x.unique(), name='x'))
assert_eq(ddf.y.unique(), pd.Series(pdf.y.unique(), name='y'))
assert_eq(ddf.x.unique(split_every=2),
pd.Series(pdf.x.unique(), name='x'))
assert_eq(ddf.y.unique(split_every=2),
pd.Series(pdf.y.unique(), name='y'))
assert ddf.x.unique(split_every=2)._name != ddf.x.unique()._name
def test_isin():
# Series test
assert_eq(d.a.isin([0, 1, 2]), full.a.isin([0, 1, 2]))
assert_eq(d.a.isin(pd.Series([0, 1, 2])),
full.a.isin(pd.Series([0, 1, 2])))
# DataFrame test
assert_eq(d.isin([0, 1, 2]), full.isin([0, 1, 2]))
def test_len():
assert len(d) == len(full)
assert len(d.a) == len(full.a)
def test_size():
assert_eq(d.size, full.size)
assert_eq(d.a.size, full.a.size)
assert_eq(d.index.size, full.index.size)
def test_nbytes():
assert_eq(d.a.nbytes, full.a.nbytes)
assert_eq(d.index.nbytes, full.index.nbytes)
def test_quantile():
# series / multiple
result = d.b.quantile([.3, .7])
exp = full.b.quantile([.3, .7]) # result may different
assert len(result) == 2
assert result.divisions == (.3, .7)
assert_eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert result.iloc[0] == 0
assert 5 < result.iloc[1] < 6
# index
s = pd.Series(np.arange(10), index=np.arange(10))
ds = dd.from_pandas(s, 2)
result = ds.index.quantile([.3, .7])
exp = s.quantile([.3, .7])
assert len(result) == 2
assert result.divisions == (.3, .7)
assert_eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert 1 < result.iloc[0] < 2
assert 7 < result.iloc[1] < 8
# series / single
result = d.b.quantile(.5)
exp = full.b.quantile(.5) # result may different
assert isinstance(result, dd.core.Scalar)
result = result.compute()
assert 4 < result < 6
def test_quantile_missing():
df = pd.DataFrame({"A": [0, np.nan, 2]})
ddf = dd.from_pandas(df, 2)
expected = df.quantile()
result = ddf.quantile()
assert_eq(result, expected)
expected = df.A.quantile()
result = ddf.A.quantile()
assert_eq(result, expected)
def test_empty_quantile():
result = d.b.quantile([])
exp = full.b.quantile([])
assert result.divisions == (None, None)
assert result.name == 'b'
assert result.compute().name == 'b'
assert_eq(result, exp)
def test_dataframe_quantile():
# column X is for test column order and result division
df = pd.DataFrame({'A': np.arange(20),
'X': np.arange(20, 40),
'B': np.arange(10, 30),
'C': ['a', 'b', 'c', 'd'] * 5},
columns=['A', 'X', 'B', 'C'])
ddf = dd.from_pandas(df, 3)
result = ddf.quantile()
assert result.npartitions == 1
assert result.divisions == ('A', 'X')
result = result.compute()
assert isinstance(result, pd.Series)
assert result.name == 0.5
tm.assert_index_equal(result.index, pd.Index(['A', 'X', 'B']))
assert (result > pd.Series([16, 36, 26], index=['A', 'X', 'B'])).all()
assert (result < pd.Series([17, 37, 27], index=['A', 'X', 'B'])).all()
result = ddf.quantile([0.25, 0.75])
assert result.npartitions == 1
assert result.divisions == (0.25, 0.75)
result = result.compute()
assert isinstance(result, pd.DataFrame)
tm.assert_index_equal(result.index, pd.Index([0.25, 0.75]))
tm.assert_index_equal(result.columns, pd.Index(['A', 'X', 'B']))
minexp = pd.DataFrame([[1, 21, 11], [17, 37, 27]],
index=[0.25, 0.75], columns=['A', 'X', 'B'])
assert (result > minexp).all().all()
maxexp = pd.DataFrame([[2, 22, 12], [18, 38, 28]],
index=[0.25, 0.75], columns=['A', 'X', 'B'])
assert (result < maxexp).all().all()
assert_eq(ddf.quantile(axis=1), df.quantile(axis=1))
pytest.raises(ValueError, lambda: ddf.quantile([0.25, 0.75], axis=1))
def test_index():
assert_eq(d.index, full.index)
def test_assign():
d_unknown = dd.from_pandas(full, npartitions=3, sort=False)
assert not d_unknown.known_divisions
res = d.assign(c=1,
d='string',
e=d.a.sum(),
f=d.a + d.b,
g=lambda x: x.a + x.b,
dt=pd.Timestamp(2018, 2, 13))
res_unknown = d_unknown.assign(c=1,
d='string',
e=d_unknown.a.sum(),
f=d_unknown.a + d_unknown.b,
g=lambda x: x.a + x.b,
dt=pd.Timestamp(2018, 2, 13))
sol = full.assign(c=1,
d='string',
e=full.a.sum(),
f=full.a + full.b,
g=lambda x: x.a + x.b,
dt=pd.Timestamp(2018, 2, 13))
assert_eq(res, sol)
assert_eq(res_unknown, sol)
res = d.assign(c=full.a + 1)
assert_eq(res, full.assign(c=full.a + 1))
# divisions unknown won't work with pandas
with pytest.raises(ValueError):
d_unknown.assign(c=full.a + 1)
# unsupported type
with pytest.raises(TypeError):
d.assign(c=list(range(9)))
# Fails when assigning known divisions to unknown divisions
with pytest.raises(ValueError):
d_unknown.assign(foo=d.a)
# Fails when assigning unknown divisions to known divisions
with pytest.raises(ValueError):
d.assign(foo=d_unknown.a)
def test_map():
assert_eq(d.a.map(lambda x: x + 1), full.a.map(lambda x: x + 1))
lk = dict((v, v + 1) for v in full.a.values)
assert_eq(d.a.map(lk), full.a.map(lk))
assert_eq(d.b.map(lk), full.b.map(lk))
lk = pd.Series(lk)
assert_eq(d.a.map(lk), full.a.map(lk))
assert_eq(d.b.map(lk), full.b.map(lk))
assert_eq(d.b.map(lk, meta=d.b), full.b.map(lk))
assert_eq(d.b.map(lk, meta=('b', 'i8')), full.b.map(lk))
pytest.raises(TypeError, lambda: d.a.map(d.b))
def test_concat():
x = _concat([pd.DataFrame(columns=['a', 'b']),
pd.DataFrame(columns=['a', 'b'])])
assert list(x.columns) == ['a', 'b']
assert len(x) == 0
def test_args():
e = d.assign(c=d.a + 1)
f = type(e)(*e._args)
assert_eq(e, f)
assert_eq(d.a, type(d.a)(*d.a._args))
assert_eq(d.a.sum(), type(d.a.sum())(*d.a.sum()._args))
def test_known_divisions():
assert d.known_divisions
df = dd.DataFrame(dsk, 'x', meta, divisions=[None, None, None])
assert not df.known_divisions
def test_unknown_divisions():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
meta = make_meta({'a': 'i8', 'b': 'i8'})
d = dd.DataFrame(dsk, 'x', meta, [None, None, None, None])
full = d.compute(scheduler='sync')
assert_eq(d.a.sum(), full.a.sum())
assert_eq(d.a + d.b + 1, full.a + full.b + 1)
@pytest.mark.parametrize('join', ['inner', 'outer', 'left', 'right'])
def test_align(join):
df1a = pd.DataFrame({'A': np.random.randn(10),
'B': np.random.randn(10)},
index=[1, 12, 5, 6, 3, 9, 10, 4, 13, 11])
df1b = pd.DataFrame({'A': np.random.randn(10),
'B': np.random.randn(10)},
index=[0, 3, 2, 10, 5, 6, 7, 8, 12, 13])
ddf1a = dd.from_pandas(df1a, 3)
ddf1b = dd.from_pandas(df1b, 3)
# DataFrame
res1, res2 = ddf1a.align(ddf1b, join=join)
exp1, exp2 = df1a.align(df1b, join=join)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# Series
res1, res2 = ddf1a['A'].align(ddf1b['B'], join=join)
exp1, exp2 = df1a['A'].align(df1b['B'], join=join)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# DataFrame with fill_value
res1, res2 = ddf1a.align(ddf1b, join=join, fill_value=1)
exp1, exp2 = df1a.align(df1b, join=join, fill_value=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# Series
res1, res2 = ddf1a['A'].align(ddf1b['B'], join=join, fill_value=1)
exp1, exp2 = df1a['A'].align(df1b['B'], join=join, fill_value=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
@pytest.mark.parametrize('join', ['inner', 'outer', 'left', 'right'])
def test_align_axis(join):
df1a = pd.DataFrame({'A': np.random.randn(10),
'B': np.random.randn(10),
'C': np.random.randn(10)},
index=[1, 12, 5, 6, 3, 9, 10, 4, 13, 11])
df1b = pd.DataFrame({'B': np.random.randn(10),
'C': np.random.randn(10),
'D': np.random.randn(10)},
index=[0, 3, 2, 10, 5, 6, 7, 8, 12, 13])
ddf1a = dd.from_pandas(df1a, 3)
ddf1b = dd.from_pandas(df1b, 3)
res1, res2 = ddf1a.align(ddf1b, join=join, axis=0)
exp1, exp2 = df1a.align(df1b, join=join, axis=0)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis=1)
exp1, exp2 = df1a.align(df1b, join=join, axis=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis='index')
exp1, exp2 = df1a.align(df1b, join=join, axis='index')
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis='columns')
exp1, exp2 = df1a.align(df1b, join=join, axis='columns')
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# invalid
with pytest.raises(ValueError):
ddf1a.align(ddf1b, join=join, axis='XXX')
with pytest.raises(ValueError):
ddf1a['A'].align(ddf1b['B'], join=join, axis=1)
def test_combine():
df1 = pd.DataFrame({'A': np.random.choice([1, 2, np.nan], 100),
'B': np.random.choice(['a', 'b', np.nan], 100)})
df2 = pd.DataFrame({'A': np.random.choice([1, 2, 3], 100),
'B': np.random.choice(['a', 'b', 'c'], 100)})
ddf1 = dd.from_pandas(df1, 4)
ddf2 = dd.from_pandas(df2, 5)
first = lambda a, b: a
# DataFrame
for dda, ddb, a, b in [(ddf1, ddf2, df1, df2),
(ddf1.A, ddf2.A, df1.A, df2.A),
(ddf1.B, ddf2.B, df1.B, df2.B)]:
for func, fill_value in [(add, None), (add, 100), (first, None)]:
sol = a.combine(b, func, fill_value=fill_value)
assert_eq(dda.combine(ddb, func, fill_value=fill_value), sol)
assert_eq(dda.combine(b, func, fill_value=fill_value), sol)
assert_eq(ddf1.combine(ddf2, add, overwrite=False),
df1.combine(df2, add, overwrite=False))
assert dda.combine(ddb, add)._name == dda.combine(ddb, add)._name
def test_combine_first():
df1 = pd.DataFrame({'A': np.random.choice([1, 2, np.nan], 100),
'B': np.random.choice(['a', 'b', np.nan], 100)})
df2 = pd.DataFrame({'A': np.random.choice([1, 2, 3], 100),
'B': np.random.choice(['a', 'b', 'c'], 100)})
ddf1 = dd.from_pandas(df1, 4)
ddf2 = dd.from_pandas(df2, 5)
# DataFrame
assert_eq(ddf1.combine_first(ddf2), df1.combine_first(df2))
assert_eq(ddf1.combine_first(df2), df1.combine_first(df2))
# Series
assert_eq(ddf1.A.combine_first(ddf2.A), df1.A.combine_first(df2.A))
assert_eq(ddf1.A.combine_first(df2.A), df1.A.combine_first(df2.A))
assert_eq(ddf1.B.combine_first(ddf2.B), df1.B.combine_first(df2.B))
assert_eq(ddf1.B.combine_first(df2.B), df1.B.combine_first(df2.B))
def test_dataframe_picklable():
from pickle import loads, dumps
cloudpickle = pytest.importorskip('cloudpickle')
cp_dumps = cloudpickle.dumps
d = tm.makeTimeDataFrame()
df = dd.from_pandas(d, npartitions=3)
df = df + 2
# dataframe
df2 = loads(dumps(df))
assert_eq(df, df2)
df2 = loads(cp_dumps(df))
assert_eq(df, df2)
# series
a2 = loads(dumps(df.A))
assert_eq(df.A, a2)
a2 = loads(cp_dumps(df.A))
assert_eq(df.A, a2)
# index
i2 = loads(dumps(df.index))
assert_eq(df.index, i2)
i2 = loads(cp_dumps(df.index))
assert_eq(df.index, i2)
# scalar
# lambdas are present, so only test cloudpickle
s = df.A.sum()
s2 = loads(cp_dumps(s))
assert_eq(s, s2)
def test_random_partitions():
a, b = d.random_split([0.5, 0.5], 42)
assert isinstance(a, dd.DataFrame)
assert isinstance(b, dd.DataFrame)
assert a._name != b._name
assert len(a.compute()) + len(b.compute()) == len(full)
a2, b2 = d.random_split([0.5, 0.5], 42)
assert a2._name == a._name
assert b2._name == b._name
parts = d.random_split([0.4, 0.5, 0.1], 42)
names = set([p._name for p in parts])
names.update([a._name, b._name])
assert len(names) == 5
with pytest.raises(ValueError):
d.random_split([0.4, 0.5], 42)
def test_series_round():
ps = pd.Series([1.123, 2.123, 3.123, 1.234, 2.234, 3.234], name='a')
s = dd.from_pandas(ps, npartitions=3)
assert_eq(s.round(), ps.round())
@pytest.mark.slow
def test_repartition():
def _check_split_data(orig, d):
"""Check data is split properly"""
keys = [k for k in d.dask if k[0].startswith('repartition-split')]
keys = sorted(keys)
sp = pd.concat([compute_as_if_collection(dd.DataFrame, d.dask, k)
for k in keys])
assert_eq(orig, sp)
assert_eq(orig, d)
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.repartition(divisions=[10, 20, 50, 60])
assert b.divisions == (10, 20, 50, 60)
assert_eq(a, b)
assert_eq(compute_as_if_collection(dd.DataFrame, b.dask, (b._name, 0)),
df.iloc[:1])
for div in [[20, 60], [10, 50], [1], # first / last element mismatch
[0, 60], [10, 70], # do not allow to expand divisions by default
[10, 50, 20, 60], # not sorted
[10, 10, 20, 60]]: # not unique (last element can be duplicated)
pytest.raises(ValueError, lambda: a.repartition(divisions=div))
pdf = pd.DataFrame(np.random.randn(7, 5), columns=list('abxyz'))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert_eq(ddf, pdf)
for div in [[0, 6], [0, 6, 6], [0, 5, 6], [0, 4, 6, 6],
[0, 2, 6], [0, 2, 6, 6],
[0, 2, 3, 6, 6], [0, 1, 2, 3, 4, 5, 6, 6]]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
# expand divisions
for div in [[-5, 10], [-2, 3, 5, 6], [0, 4, 5, 9, 10]]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
pdf = pd.DataFrame({'x': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
'y': [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]},
index=list('abcdefghij'))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert_eq(ddf, pdf)
for div in [list('aj'), list('ajj'), list('adj'),
list('abfj'), list('ahjj'), list('acdj'), list('adfij'),
list('abdefgij'), list('abcdefghij')]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
# expand divisions
for div in [list('Yadijm'), list('acmrxz'), list('Yajz')]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
def test_repartition_divisions():
result = repartition_divisions([0, 6], [0, 6, 6], 'a', 'b', 'c')
assert result == {('b', 0): (methods.boundary_slice, ('a', 0), 0, 6, False),
('b', 1): (methods.boundary_slice, ('a', 0), 6, 6, True),
('c', 0): ('b', 0),
('c', 1): ('b', 1)}
result = repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c')
assert result == {('b', 0): (methods.boundary_slice, ('a', 0), 1, 3, False),
('b', 1): (methods.boundary_slice, ('a', 1), 3, 4, False),
('b', 2): (methods.boundary_slice, ('a', 1), 4, 6, False),
('b', 3): (methods.boundary_slice, ('a', 1), 6, 7, True),
('c', 0): (methods.concat, [('b', 0), ('b', 1)]),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
def test_repartition_on_pandas_dataframe():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
ddf = dd.repartition(df, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.DataFrame)
assert ddf.divisions == (10, 20, 50, 60)
assert_eq(ddf, df)
ddf = dd.repartition(df.y, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.Series)
assert ddf.divisions == (10, 20, 50, 60)
assert_eq(ddf, df.y)
@pytest.mark.parametrize('use_index', [True, False])
@pytest.mark.parametrize('n', [1, 2, 4, 5])
@pytest.mark.parametrize('k', [1, 2, 4, 5])
@pytest.mark.parametrize('dtype', [int, float, 'M8[ns]'])
@pytest.mark.parametrize('transform', [lambda df: df, lambda df: df.x])
def test_repartition_npartitions(use_index, n, k, dtype, transform):
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6] * 10,
'y': list('abdabd') * 10},
index=pd.Series([10, 20, 30, 40, 50, 60] * 10, dtype=dtype))
df = transform(df)
a = dd.from_pandas(df, npartitions=n, sort=use_index)
b = a.repartition(npartitions=k)
assert_eq(a, b)
assert b.npartitions == k
parts = dask.get(b.dask, b.__dask_keys__())
assert all(map(len, parts))
def test_repartition_npartitions_same_limits():
df = pd.DataFrame({'x': [1, 2, 3]},
index=[pd.Timestamp('2017-05-09 00:00:00.006000'),
pd.Timestamp('2017-05-09 02:45:00.017999'),
pd.Timestamp('2017-05-09 05:59:58.938999')])
ddf = dd.from_pandas(df, npartitions=2)
ddf.repartition(npartitions=10)
def test_repartition_object_index():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6] * 10},
index=list('abdabd') * 10)
a = dd.from_pandas(df, npartitions=5)
b = a.repartition(npartitions=2)
assert b.npartitions == 2
assert_eq(b, df)
b = a.repartition(npartitions=10)
assert b.npartitions == 10
assert_eq(b, df)
assert not b.known_divisions
@pytest.mark.slow
@pytest.mark.parametrize('npartitions', [1, 20, 243])
@pytest.mark.parametrize('freq', ['1D', '7D', '28h', '1h'])
@pytest.mark.parametrize('end', ['2000-04-15', '2000-04-15 12:37:01', '2000-01-01 12:37:00'])
@pytest.mark.parametrize('start', ['2000-01-01', '2000-01-01 12:30:00', '2000-01-01 12:30:00'])
def test_repartition_freq(npartitions, freq, start, end):
start = pd.Timestamp(start)
end = pd.Timestamp(end)
ind = pd.DatetimeIndex(start=start, end=end, freq='60s')
df = pd.DataFrame({'x': np.arange(len(ind))}, index=ind)
ddf = dd.from_pandas(df, npartitions=npartitions, name='x')
ddf2 = ddf.repartition(freq=freq)
assert_eq(ddf2, df)
def test_repartition_freq_divisions():
df = pd.DataFrame({'x': np.random.random(10)},
index=pd.DatetimeIndex(np.random.random(10) * 100e9))
ddf = dd.from_pandas(df, npartitions=3)
ddf2 = ddf.repartition(freq='15s')
for div in ddf2.divisions[1:-1]:
assert div == div.round('15s')
assert ddf2.divisions[0] == df.index.min()
assert ddf2.divisions[-1] == df.index.max()
assert_eq(ddf2, ddf2)
def test_repartition_freq_errors():
df = pd.DataFrame({'x': [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=1)
with pytest.raises(TypeError) as info:
ddf.repartition(freq='1s')
assert 'only' in str(info.value)
assert 'timeseries' in str(info.value)
def test_repartition_freq_month():
ts = pd.date_range("2015-01-01 00:00", " 2015-05-01 23:50", freq="10min")
df = pd.DataFrame(np.random.randint(0,100,size=(len(ts),4)),
columns=list('ABCD'), index=ts)
ddf = dd.from_pandas(df,npartitions=1).repartition(freq='1M')
assert_eq(df, ddf)
assert 2 < ddf.npartitions <= 6
def test_embarrassingly_parallel_operations():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
assert_eq(a.x.astype('float32'), df.x.astype('float32'))
assert a.x.astype('float32').compute().dtype == 'float32'
assert_eq(a.x.dropna(), df.x.dropna())
assert_eq(a.x.between(2, 4), df.x.between(2, 4))
assert_eq(a.x.clip(2, 4), df.x.clip(2, 4))
assert_eq(a.x.notnull(), df.x.notnull())
assert_eq(a.x.isnull(), df.x.isnull())
assert_eq(a.notnull(), df.notnull())
assert_eq(a.isnull(), df.isnull())
assert len(a.sample(frac=0.5).compute()) < len(df)
def test_fillna():
df = tm.makeMissingDataframe(0.8, 42)
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.fillna(100), df.fillna(100))
assert_eq(ddf.A.fillna(100), df.A.fillna(100))
assert_eq(ddf.fillna(method='pad'), df.fillna(method='pad'))
assert_eq(ddf.A.fillna(method='pad'), df.A.fillna(method='pad'))
assert_eq(ddf.fillna(method='bfill'), df.fillna(method='bfill'))
assert_eq(ddf.A.fillna(method='bfill'), df.A.fillna(method='bfill'))
assert_eq(ddf.fillna(method='pad', limit=2),
df.fillna(method='pad', limit=2))
assert_eq(ddf.A.fillna(method='pad', limit=2),
df.A.fillna(method='pad', limit=2))
assert_eq(ddf.fillna(method='bfill', limit=2),
df.fillna(method='bfill', limit=2))
assert_eq(ddf.A.fillna(method='bfill', limit=2),
df.A.fillna(method='bfill', limit=2))
assert_eq(ddf.fillna(100, axis=1), df.fillna(100, axis=1))
assert_eq(ddf.fillna(method='pad', axis=1), df.fillna(method='pad', axis=1))
assert_eq(ddf.fillna(method='pad', limit=2, axis=1),
df.fillna(method='pad', limit=2, axis=1))
pytest.raises(ValueError, lambda: ddf.A.fillna(0, axis=1))
pytest.raises(NotImplementedError, lambda: ddf.fillna(0, limit=10))
pytest.raises(NotImplementedError, lambda: ddf.fillna(0, limit=10, axis=1))
df = tm.makeMissingDataframe(0.2, 42)
ddf = dd.from_pandas(df, npartitions=5, sort=False)
pytest.raises(ValueError, lambda: ddf.fillna(method='pad').compute())
assert_eq(df.fillna(method='pad', limit=3),
ddf.fillna(method='pad', limit=3))
def test_fillna_multi_dataframe():
df = tm.makeMissingDataframe(0.8, 42)
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.A.fillna(ddf.B), df.A.fillna(df.B))
assert_eq(ddf.B.fillna(ddf.A), df.B.fillna(df.A))
def test_ffill_bfill():
df = tm.makeMissingDataframe(0.8, 42)
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.ffill(), df.ffill())
assert_eq(ddf.bfill(), df.bfill())
assert_eq(ddf.ffill(axis=1), df.ffill(axis=1))
assert_eq(ddf.bfill(axis=1), df.bfill(axis=1))
def test_fillna_series_types():
# https://github.com/dask/dask/issues/2809
df = pd.DataFrame({"A": [1, np.nan, 3], "B": [1, np.nan, 3]})
ddf = dd.from_pandas(df, npartitions=2)
fill_value = pd.Series([1, 10], index=['A', 'C'])
assert_eq(ddf.fillna(fill_value), df.fillna(fill_value))
def test_sample():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.sample(frac=0.5)
assert_eq(b, b)
c = a.sample(frac=0.5, random_state=1234)
d = a.sample(frac=0.5, random_state=1234)
assert_eq(c, d)
assert a.sample(frac=0.5)._name != a.sample(frac=0.5)._name
def test_sample_without_replacement():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.sample(frac=0.7, replace=False)
bb = b.index.compute()
assert len(bb) == len(set(bb))
def test_sample_raises():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
# Make sure frac is replaced with n when 0 <= n <= 1
# This is so existing code (i.e. ddf.sample(0.5)) won't break
with pytest.warns(UserWarning):
b = a.sample(0.5, random_state=1234)
c = a.sample(frac=0.5, random_state=1234)
assert_eq(b, c)
with pytest.raises(ValueError):
a.sample(n=10)
# Make sure frac is provided
with pytest.raises(ValueError):
a.sample(frac=None)
def test_datetime_accessor():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
df['x'] = df.x.astype('M8[us]')
a = dd.from_pandas(df, 2)
assert 'date' in dir(a.x.dt)
# pandas loses Series.name via datetime accessor
# see https://github.com/pydata/pandas/issues/10712
assert_eq(a.x.dt.date, df.x.dt.date, check_names=False)
# to_pydatetime returns a numpy array in pandas, but a Series in dask
assert_eq(a.x.dt.to_pydatetime(),
pd.Series(df.x.dt.to_pydatetime(), index=df.index, dtype=object))
assert set(a.x.dt.date.dask) == set(a.x.dt.date.dask)
assert set(a.x.dt.to_pydatetime().dask) == set(a.x.dt.to_pydatetime().dask)
def test_str_accessor():
df = pd.DataFrame({'x': ['abc', 'bcd', 'cdef', 'DEFG'], 'y': [1, 2, 3, 4]},
index=['E', 'f', 'g', 'h'])
ddf = dd.from_pandas(df, 2)
# Check that str not in dir/hasattr for non-object columns
assert 'str' not in dir(ddf.y)
assert not hasattr(ddf.y, 'str')
# not implemented methods don't show up
assert 'get_dummies' not in dir(ddf.x.str)
assert not hasattr(ddf.x.str, 'get_dummies')
assert 'upper' in dir(ddf.x.str)
assert_eq(ddf.x.str.upper(), df.x.str.upper())
assert set(ddf.x.str.upper().dask) == set(ddf.x.str.upper().dask)
assert 'upper' in dir(ddf.index.str)
assert_eq(ddf.index.str.upper(), df.index.str.upper())
assert set(ddf.index.str.upper().dask) == set(ddf.index.str.upper().dask)
# make sure to pass thru args & kwargs
assert 'contains' in dir(ddf.x.str)
assert_eq(ddf.x.str.contains('a'), df.x.str.contains('a'))
assert set(ddf.x.str.contains('a').dask) == set(ddf.x.str.contains('a').dask)
assert_eq(ddf.x.str.contains('d', case=False), df.x.str.contains('d', case=False))
assert (set(ddf.x.str.contains('d', case=False).dask) ==
set(ddf.x.str.contains('d', case=False).dask))
for na in [True, False]:
assert_eq(ddf.x.str.contains('a', na=na), df.x.str.contains('a', na=na))
assert (set(ddf.x.str.contains('a', na=na).dask) ==
set(ddf.x.str.contains('a', na=na).dask))
for regex in [True, False]:
assert_eq(ddf.x.str.contains('a', regex=regex), df.x.str.contains('a', regex=regex))
assert (set(ddf.x.str.contains('a', regex=regex).dask) ==
set(ddf.x.str.contains('a', regex=regex).dask))
assert_eq(ddf.x.str[:2], df.x.str[:2])
assert_eq(ddf.x.str[1], df.x.str[1])
# str.extractall
assert_eq(ddf.x.str.extractall('(.*)b(.*)'),
df.x.str.extractall('(.*)b(.*)'))
# str.cat
sol = df.x.str.cat(df.x.str.upper(), sep=':')
assert_eq(ddf.x.str.cat(ddf.x.str.upper(), sep=':'), sol)
assert_eq(ddf.x.str.cat(df.x.str.upper(), sep=':'), sol)
assert_eq(ddf.x.str.cat([ddf.x.str.upper(), df.x.str.lower()], sep=':'),
df.x.str.cat([df.x.str.upper(), df.x.str.lower()], sep=':'))
for o in ['foo', ['foo']]:
with pytest.raises(TypeError):
ddf.x.str.cat(o)
with pytest.raises(NotImplementedError):
ddf.x.str.cat(sep=':')
def test_empty_max():
meta = make_meta({'x': 'i8'})
a = dd.DataFrame({('x', 0): pd.DataFrame({'x': [1]}),
('x', 1): pd.DataFrame({'x': []})}, 'x',
meta, [None, None, None])
assert_eq(a.x.max(), 1)
def test_query():
pytest.importorskip('numexpr')
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.query('x**2 > y'),
df.query('x**2 > y'))
assert_eq(ddf.query('x**2 > @value', local_dict={'value': 4}),
df.query('x**2 > @value', local_dict={'value': 4}))
def test_eval():
pytest.importorskip('numexpr')
p = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
d = dd.from_pandas(p, npartitions=2)
assert_eq(p.eval('x + y'), d.eval('x + y'))
assert_eq(p.eval('z = x + y', inplace=False),
d.eval('z = x + y', inplace=False))
with pytest.raises(NotImplementedError):
d.eval('z = x + y', inplace=True)
# catch FutureWarning from pandas about assignment in eval
with pytest.warns(None):
if PANDAS_VERSION < '0.21.0':
if p.eval('z = x + y', inplace=None) is None:
with pytest.raises(NotImplementedError):
d.eval('z = x + y', inplace=None)
@pytest.mark.parametrize('include, exclude', [
([int], None),
(None, [int]),
([np.number, object], [float]),
(['datetime'], None)
])
def test_select_dtypes(include, exclude):
n = 10
df = pd.DataFrame({'cint': [1] * n,
'cstr': ['a'] * n,
'clfoat': [1.] * n,
'cdt': pd.date_range('2016-01-01', periods=n)})
a = dd.from_pandas(df, npartitions=2)
result = a.select_dtypes(include=include, exclude=exclude)
expected = df.select_dtypes(include=include, exclude=exclude)
assert_eq(result, expected)
# count dtypes
tm.assert_series_equal(a.get_dtype_counts(), df.get_dtype_counts())
tm.assert_series_equal(result.get_dtype_counts(),
expected.get_dtype_counts())
if PANDAS_VERSION >= '0.23.0':
ctx = pytest.warns(FutureWarning)
else:
ctx = pytest.warns(None)
with ctx:
tm.assert_series_equal(a.get_ftype_counts(), df.get_ftype_counts())
tm.assert_series_equal(result.get_ftype_counts(),
expected.get_ftype_counts())
def test_deterministic_apply_concat_apply_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert sorted(a.x.nlargest(2).dask) == sorted(a.x.nlargest(2).dask)
assert sorted(a.x.nlargest(2).dask) != sorted(a.x.nlargest(3).dask)
assert (sorted(a.x.drop_duplicates().dask) ==
sorted(a.x.drop_duplicates().dask))
assert (sorted(a.groupby('x').y.mean().dask) ==
sorted(a.groupby('x').y.mean().dask))
# Test aca without passing in token string
f = lambda a: a.nlargest(5)
f2 = lambda a: a.nlargest(3)
assert (sorted(aca(a.x, f, f, a.x._meta).dask) !=
sorted(aca(a.x, f2, f2, a.x._meta).dask))
assert (sorted(aca(a.x, f, f, a.x._meta).dask) ==
sorted(aca(a.x, f, f, a.x._meta).dask))
# Test aca with keywords
def chunk(x, c_key=0, both_key=0):
return x.sum() + c_key + both_key
def agg(x, a_key=0, both_key=0):
return pd.Series(x).sum() + a_key + both_key
c_key = 2
a_key = 3
both_key = 4
res = aca(a.x, chunk=chunk, aggregate=agg, chunk_kwargs={'c_key': c_key},
aggregate_kwargs={'a_key': a_key}, both_key=both_key)
assert (sorted(res.dask) ==
sorted(aca(a.x, chunk=chunk, aggregate=agg,
chunk_kwargs={'c_key': c_key},
aggregate_kwargs={'a_key': a_key},
both_key=both_key).dask))
assert (sorted(res.dask) !=
sorted(aca(a.x, chunk=chunk, aggregate=agg,
chunk_kwargs={'c_key': c_key},
aggregate_kwargs={'a_key': a_key},
both_key=0).dask))
assert_eq(res, df.x.sum() + 2 * (c_key + both_key) + a_key + both_key)
def test_aca_meta_infer():
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=2)
def chunk(x, y, constant=1.0):
return (x + y + constant).head()
def agg(x):
return x.head()
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg,
chunk_kwargs=dict(constant=2.0))
sol = (df + 2.0 + 2.0).head()
assert_eq(res, sol)
# Should infer as a scalar
res = aca([ddf.x], chunk=lambda x: pd.Series([x.sum()]),
aggregate=lambda x: x.sum())
assert isinstance(res, Scalar)
assert res.compute() == df.x.sum()
def test_aca_split_every():
df = pd.DataFrame({'x': [1] * 60})
ddf = dd.from_pandas(df, npartitions=15)
def chunk(x, y, constant=0):
return x.sum() + y + constant
def combine(x, constant=0):
return x.sum() + constant + 1
def agg(x, constant=0):
return x.sum() + constant + 2
f = lambda n: aca([ddf, 2.0], chunk=chunk, aggregate=agg, combine=combine,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0),
split_every=n)
assert_max_deps(f(3), 3)
assert_max_deps(f(4), 4, False)
assert_max_deps(f(5), 5)
assert set(f(15).dask.keys()) == set(f(ddf.npartitions).dask.keys())
r3 = f(3)
r4 = f(4)
assert r3._name != r4._name
# Only intersect on reading operations
assert len(set(r3.dask.keys()) & set(r4.dask.keys())) == len(ddf.dask.keys())
# Keywords are different for each step
assert f(3).compute() == 60 + 15 * (2 + 1) + 7 * (2 + 1) + (3 + 2)
# Keywords are same for each step
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg, combine=combine,
constant=3.0, split_every=3)
assert res.compute() == 60 + 15 * (2 + 3) + 7 * (3 + 1) + (3 + 2)
# No combine provided, combine is agg
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg, constant=3, split_every=3)
assert res.compute() == 60 + 15 * (2 + 3) + 8 * (3 + 2)
# split_every must be >= 2
with pytest.raises(ValueError):
f(1)
# combine_kwargs with no combine provided
with pytest.raises(ValueError):
aca([ddf, 2.0], chunk=chunk, aggregate=agg, split_every=3,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0))
def test_reduction_method():
df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
ddf = dd.from_pandas(df, npartitions=4)
chunk = lambda x, val=0: (x >= val).sum()
agg = lambda x: x.sum()
# Output of chunk is a scalar
res = ddf.x.reduction(chunk, aggregate=agg)
assert_eq(res, df.x.count())
# Output of chunk is a series
res = ddf.reduction(chunk, aggregate=agg)
assert res._name == ddf.reduction(chunk, aggregate=agg)._name
assert_eq(res, df.count())
# Test with keywords
res2 = ddf.reduction(chunk, aggregate=agg, chunk_kwargs={'val': 25})
res2._name == ddf.reduction(chunk, aggregate=agg,
chunk_kwargs={'val': 25})._name
assert res2._name != res._name
assert_eq(res2, (df >= 25).sum())
# Output of chunk is a dataframe
def sum_and_count(x):
return pd.DataFrame({'sum': x.sum(), 'count': x.count()})
res = ddf.reduction(sum_and_count,
aggregate=lambda x: x.groupby(level=0).sum())
assert_eq(res, pd.DataFrame({'sum': df.sum(), 'count': df.count()}))
def test_reduction_method_split_every():
df = pd.Series([1] * 60)
ddf = dd.from_pandas(df, npartitions=15)
def chunk(x, constant=0):
return x.sum() + constant
def combine(x, constant=0):
return x.sum() + constant + 1
def agg(x, constant=0):
return x.sum() + constant + 2
f = lambda n: ddf.reduction(chunk, aggregate=agg, combine=combine,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0),
split_every=n)
assert_max_deps(f(3), 3)
assert_max_deps(f(4), 4, False)
assert_max_deps(f(5), 5)
assert set(f(15).dask.keys()) == set(f(ddf.npartitions).dask.keys())
r3 = f(3)
r4 = f(4)
assert r3._name != r4._name
# Only intersect on reading operations
assert len(set(r3.dask.keys()) & set(r4.dask.keys())) == len(ddf.dask.keys())
# Keywords are different for each step
assert f(3).compute() == 60 + 15 + 7 * (2 + 1) + (3 + 2)
# Keywords are same for each step
res = ddf.reduction(chunk, aggregate=agg, combine=combine, constant=3.0,
split_every=3)
assert res.compute() == 60 + 15 * 3 + 7 * (3 + 1) + (3 + 2)
# No combine provided, combine is agg
res = ddf.reduction(chunk, aggregate=agg, constant=3.0, split_every=3)
assert res.compute() == 60 + 15 * 3 + 8 * (3 + 2)
# split_every must be >= 2
with pytest.raises(ValueError):
f(1)
# combine_kwargs with no combine provided
with pytest.raises(ValueError):
ddf.reduction(chunk, aggregate=agg, split_every=3,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0))
def test_pipe():
df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
ddf = dd.from_pandas(df, npartitions=4)
def f(x, y, z=0):
return x + y + z
assert_eq(ddf.pipe(f, 1, z=2), f(ddf, 1, z=2))
assert_eq(ddf.x.pipe(f, 1, z=2), f(ddf.x, 1, z=2))
def test_gh_517():
arr = np.random.randn(100, 2)
df = pd.DataFrame(arr, columns=['a', 'b'])
ddf = dd.from_pandas(df, 2)
assert ddf.index.nunique().compute() == 100
ddf2 = dd.from_pandas(pd.concat([df, df]), 5)
assert ddf2.index.nunique().compute() == 100
def test_drop_axis_1():
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [5, 6, 7, 8],
'z': [9, 10, 11, 12]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.drop('y', axis=1), df.drop('y', axis=1))
assert_eq(ddf.drop(['y', 'z'], axis=1), df.drop(['y', 'z'], axis=1))
with pytest.raises(ValueError):
ddf.drop(['a', 'x'], axis=1)
assert_eq(ddf.drop(['a', 'x'], axis=1, errors='ignore'),
df.drop(['a', 'x'], axis=1, errors='ignore'))
def test_gh580():
df = pd.DataFrame({'x': np.arange(10, dtype=float)})
ddf = dd.from_pandas(df, 2)
assert_eq(np.cos(df['x']), np.cos(ddf['x']))
assert_eq(np.cos(df['x']), np.cos(ddf['x']))
def test_rename_dict():
renamer = {'a': 'A', 'b': 'B'}
assert_eq(d.rename(columns=renamer),
full.rename(columns=renamer))
def test_rename_function():
renamer = lambda x: x.upper()
assert_eq(d.rename(columns=renamer),
full.rename(columns=renamer))
def test_rename_index():
renamer = {0: 1}
pytest.raises(ValueError, lambda: d.rename(index=renamer))
def test_to_timestamp():
index = pd.PeriodIndex(freq='A', start='1/1/2001', end='12/1/2004')
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]}, index=index)
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(ddf.to_timestamp(), df.to_timestamp())
assert_eq(ddf.to_timestamp(freq='M', how='s').compute(),
df.to_timestamp(freq='M', how='s'))
assert_eq(ddf.x.to_timestamp(), df.x.to_timestamp())
assert_eq(ddf.x.to_timestamp(freq='M', how='s').compute(),
df.x.to_timestamp(freq='M', how='s'))
def test_to_frame():
s = pd.Series([1, 2, 3], name='foo')
a = dd.from_pandas(s, npartitions=2)
assert_eq(s.to_frame(), a.to_frame())
assert_eq(s.to_frame('bar'), a.to_frame('bar'))
def test_apply():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
func = lambda row: row['x'] + row['y']
assert_eq(ddf.x.apply(lambda x: x + 1, meta=("x", int)),
df.x.apply(lambda x: x + 1))
# specify meta
assert_eq(ddf.apply(lambda xy: xy[0] + xy[1], axis=1, meta=(None, int)),
df.apply(lambda xy: xy[0] + xy[1], axis=1))
assert_eq(ddf.apply(lambda xy: xy[0] + xy[1], axis='columns', meta=(None, int)),
df.apply(lambda xy: xy[0] + xy[1], axis='columns'))
# inference
with pytest.warns(None):
assert_eq(ddf.apply(lambda xy: xy[0] + xy[1], axis=1),
df.apply(lambda xy: xy[0] + xy[1], axis=1))
with pytest.warns(None):
assert_eq(ddf.apply(lambda xy: xy, axis=1),
df.apply(lambda xy: xy, axis=1))
# specify meta
func = lambda x: pd.Series([x, x])
assert_eq(ddf.x.apply(func, meta=[(0, int), (1, int)]), df.x.apply(func))
# inference
with pytest.warns(None):
assert_eq(ddf.x.apply(func), df.x.apply(func))
# axis=0
with pytest.raises(NotImplementedError):
ddf.apply(lambda xy: xy, axis=0)
with pytest.raises(NotImplementedError):
ddf.apply(lambda xy: xy, axis='index')
@pytest.mark.skipif(sys.version_info <= (3, 0),
reason="Global filter is applied by another library, and "
"not reset properly.")
def test_apply_warns():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
func = lambda row: row['x'] + row['y']
with pytest.warns(UserWarning) as w:
ddf.apply(func, axis=1)
assert len(w) == 1
with pytest.warns(None) as w:
ddf.apply(func, axis=1, meta=(None, int))
assert len(w) == 0
def test_applymap():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.applymap(lambda x: x + 1), df.applymap(lambda x: x + 1))
assert_eq(ddf.applymap(lambda x: (x, x)), df.applymap(lambda x: (x, x)))
def test_abs():
df = pd.DataFrame({'A': [1, -2, 3, -4, 5],
'B': [-6., -7, -8, -9, 10],
'C': ['a', 'b', 'c', 'd', 'e']})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.A.abs(), df.A.abs())
assert_eq(ddf[['A', 'B']].abs(), df[['A', 'B']].abs())
pytest.raises(ValueError, lambda: ddf.C.abs())
pytest.raises(TypeError, lambda: ddf.abs())
def test_round():
df = pd.DataFrame({'col1': [1.123, 2.123, 3.123],
'col2': [1.234, 2.234, 3.234]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.round(), df.round())
assert_eq(ddf.round(2), df.round(2))
def test_cov():
# DataFrame
df = pd.util.testing.makeMissingDataframe(0.3, 42)
ddf = dd.from_pandas(df, npartitions=6)
res = ddf.cov()
res2 = ddf.cov(split_every=2)
res3 = ddf.cov(10)
res4 = ddf.cov(10, split_every=2)
sol = df.cov()
sol2 = df.cov(10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == ddf.cov()._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
# Series
a = df.A
b = df.B
da = dd.from_pandas(a, npartitions=6)
db = dd.from_pandas(b, npartitions=7)
res = da.cov(db)
res2 = da.cov(db, split_every=2)
res3 = da.cov(db, 10)
res4 = da.cov(db, 10, split_every=2)
sol = a.cov(b)
sol2 = a.cov(b, 10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == da.cov(db)._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
def test_corr():
# DataFrame
df = pd.util.testing.makeMissingDataframe(0.3, 42)
ddf = dd.from_pandas(df, npartitions=6)
res = ddf.corr()
res2 = ddf.corr(split_every=2)
res3 = ddf.corr(min_periods=10)
res4 = ddf.corr(min_periods=10, split_every=2)
sol = df.corr()
sol2 = df.corr(min_periods=10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == ddf.corr()._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
pytest.raises(NotImplementedError, lambda: ddf.corr(method='spearman'))
# Series
a = df.A
b = df.B
da = dd.from_pandas(a, npartitions=6)
db = dd.from_pandas(b, npartitions=7)
res = da.corr(db)
res2 = da.corr(db, split_every=2)
res3 = da.corr(db, min_periods=10)
res4 = da.corr(db, min_periods=10, split_every=2)
sol = da.corr(db)
sol2 = da.corr(db, min_periods=10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == da.corr(db)._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
pytest.raises(NotImplementedError, lambda: da.corr(db, method='spearman'))
pytest.raises(TypeError, lambda: da.corr(ddf))
def test_cov_corr_meta():
df = pd.DataFrame({'a': np.array([1, 2, 3]),
'b': np.array([1.0, 2.0, 3.0], dtype='f4'),
'c': np.array([1.0, 2.0, 3.0])},
index=pd.Index([1, 2, 3], name='myindex'))
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.corr(), df.corr())
assert_eq(ddf.cov(), df.cov())
assert ddf.a.cov(ddf.b)._meta.dtype == 'f8'
assert ddf.a.corr(ddf.b)._meta.dtype == 'f8'
@pytest.mark.slow
def test_cov_corr_stable():
df = pd.DataFrame(np.random.uniform(-1, 1, (20000000, 2)), columns=['a', 'b'])
ddf = dd.from_pandas(df, npartitions=50)
assert_eq(ddf.cov(split_every=8), df.cov())
assert_eq(ddf.corr(split_every=8), df.corr())
def test_cov_corr_mixed():
size = 1000
d = {'dates' : pd.date_range('2015-01-01', periods=size, freq='1T'),
'unique_id' : np.arange(0, size),
'ints' : np.random.randint(0, size, size=size),
'floats' : np.random.randn(size),
'bools' : np.random.choice([0, 1], size=size),
'int_nans' : np.random.choice([0, 1, np.nan], size=size),
'float_nans' : np.random.choice([0.0, 1.0, np.nan], size=size),
'constant' : 1,
'int_categorical' : np.random.choice([10, 20, 30, 40, 50], size=size) ,
'categorical_binary' : np.random.choice(['a', 'b'], size=size),
'categorical_nans' : np.random.choice(['a', 'b', 'c'], size=size)}
df = pd.DataFrame(d)
df['hardbools'] = df['bools'] == 1
df['categorical_nans'] = df['categorical_nans'].replace('c', np.nan)
df['categorical_binary'] = df['categorical_binary'].astype('category')
df['unique_id'] = df['unique_id'].astype(str)
ddf = dd.from_pandas(df, npartitions=20)
assert_eq(ddf.corr(split_every=4), df.corr(), check_divisions=False)
assert_eq(ddf.cov(split_every=4), df.cov(), check_divisions=False)
def test_autocorr():
x = pd.Series(np.random.random(100))
dx = dd.from_pandas(x, npartitions=10)
assert_eq(dx.autocorr(2), x.autocorr(2))
assert_eq(dx.autocorr(0), x.autocorr(0))
assert_eq(dx.autocorr(-2), x.autocorr(-2))
assert_eq(dx.autocorr(2, split_every=3), x.autocorr(2))
pytest.raises(TypeError, lambda: dx.autocorr(1.5))
def test_apply_infer_columns():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
def return_df(x):
# will create new DataFrame which columns is ['sum', 'mean']
return pd.Series([x.sum(), x.mean()], index=['sum', 'mean'])
# DataFrame to completely different DataFrame
with pytest.warns(None):
result = ddf.apply(return_df, axis=1)
assert isinstance(result, dd.DataFrame)
tm.assert_index_equal(result.columns, pd.Index(['sum', 'mean']))
assert_eq(result, df.apply(return_df, axis=1))
# DataFrame to Series
with pytest.warns(None):
result = ddf.apply(lambda x: 1, axis=1)
assert isinstance(result, dd.Series)
assert result.name is None
assert_eq(result, df.apply(lambda x: 1, axis=1))
def return_df2(x):
return pd.Series([x * 2, x * 3], index=['x2', 'x3'])
# Series to completely different DataFrame
with pytest.warns(None):
result = ddf.x.apply(return_df2)
assert isinstance(result, dd.DataFrame)
tm.assert_index_equal(result.columns, pd.Index(['x2', 'x3']))
assert_eq(result, df.x.apply(return_df2))
# Series to Series
with pytest.warns(None):
result = ddf.x.apply(lambda x: 1)
assert isinstance(result, dd.Series)
assert result.name == 'x'
assert_eq(result, df.x.apply(lambda x: 1))
def test_index_time_properties():
i = tm.makeTimeSeries()
a = dd.from_pandas(i, npartitions=3)
assert 'day' in dir(a.index)
# returns a numpy array in pandas, but a Index in dask
assert_eq(a.index.day, pd.Index(i.index.day))
assert_eq(a.index.month, pd.Index(i.index.month))
def test_nlargest_nsmallest():
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(20),
'b': list(ascii_lowercase[:20]),
'c': np.random.permutation(20).astype('float64')})
ddf = dd.from_pandas(df, npartitions=3)
for m in ['nlargest', 'nsmallest']:
f = lambda df, *args, **kwargs: getattr(df, m)(*args, **kwargs)
res = f(ddf, 5, 'a')
res2 = f(ddf, 5, 'a', split_every=2)
sol = f(df, 5, 'a')
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = f(ddf, 5, ['a', 'c'])
res2 = f(ddf, 5, ['a', 'c'], split_every=2)
sol = f(df, 5, ['a', 'c'])
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = f(ddf.a, 5)
res2 = f(ddf.a, 5, split_every=2)
sol = f(df.a, 5)
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
def test_reset_index():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
sol = df.reset_index()
res = ddf.reset_index()
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
sol = df.reset_index(drop=True)
res = ddf.reset_index(drop=True)
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
sol = df.x.reset_index()
res = ddf.x.reset_index()
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
sol = df.x.reset_index(drop=True)
res = ddf.x.reset_index(drop=True)
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
def test_dataframe_compute_forward_kwargs():
x = dd.from_pandas(pd.DataFrame({'a': range(10)}), npartitions=2).a.sum()
x.compute(bogus_keyword=10)
def test_series_iteritems():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df['x'].iteritems(), ddf['x'].iteritems()):
assert a == b
def test_dataframe_iterrows():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.iterrows(), ddf.iterrows()):
tm.assert_series_equal(a[1], b[1])
def test_dataframe_itertuples():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.itertuples(), ddf.itertuples()):
assert a == b
def test_astype():
df = pd.DataFrame({'x': [1, 2, 3, None], 'y': [10, 20, 30, 40]},
index=[10, 20, 30, 40])
a = dd.from_pandas(df, 2)
assert_eq(a.astype(float), df.astype(float))
assert_eq(a.x.astype(float), df.x.astype(float))
def test_astype_categoricals():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'b', 'c'],
'y': ['x', 'y', 'z', 'x', 'y'],
'z': [1, 2, 3, 4, 5]})
df = df.astype({'y': 'category'})
ddf = dd.from_pandas(df, 2)
assert ddf.y.cat.known
ddf2 = ddf.astype({'x': 'category'})
assert not ddf2.x.cat.known
assert ddf2.y.cat.known
assert ddf2.x.dtype == 'category'
assert ddf2.compute().x.dtype == 'category'
dx = ddf.x.astype('category')
assert not dx.cat.known
assert dx.dtype == 'category'
assert dx.compute().dtype == 'category'
@pytest.mark.skipif(PANDAS_VERSION < '0.21.0',
reason="No CategoricalDtype with categories")
def test_astype_categoricals_known():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'b', 'c'],
'y': ['x', 'y', 'z', 'y', 'z'],
'z': ['b', 'b', 'b', 'c', 'b'],
'other': [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, 2)
abc = pd.api.types.CategoricalDtype(['a', 'b', 'c'])
category = pd.api.types.CategoricalDtype()
# DataFrame
ddf2 = ddf.astype({'x': abc,
'y': category,
'z': 'category',
'other': 'f8'})
for col, known in [('x', True), ('y', False), ('z', False)]:
x = getattr(ddf2, col)
assert pd.api.types.is_categorical_dtype(x.dtype)
assert x.cat.known == known
# Series
for dtype, known in [('category', False), (category, False), (abc, True)]:
dx2 = ddf.x.astype(dtype)
assert pd.api.types.is_categorical_dtype(dx2.dtype)
assert dx2.cat.known == known
def test_groupby_callable():
a = pd.DataFrame({'x': [1, 2, 3, None], 'y': [10, 20, 30, 40]},
index=[1, 2, 3, 4])
b = dd.from_pandas(a, 2)
def iseven(x):
return x % 2 == 0
assert_eq(a.groupby(iseven).y.sum(),
b.groupby(iseven).y.sum())
assert_eq(a.y.groupby(iseven).sum(),
b.y.groupby(iseven).sum())
def test_methods_tokenize_differently():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
df = dd.from_pandas(df, npartitions=1)
assert (df.x.map_partitions(lambda x: pd.Series(x.min()))._name !=
df.x.map_partitions(lambda x: pd.Series(x.max()))._name)
def _assert_info(df, ddf, memory_usage=True):
from io import StringIO
assert isinstance(df, pd.DataFrame)
assert isinstance(ddf, dd.DataFrame)
buf_pd, buf_da = StringIO(), StringIO()
df.info(buf=buf_pd, memory_usage=memory_usage)
ddf.info(buf=buf_da, verbose=True, memory_usage=memory_usage)
stdout_pd = buf_pd.getvalue()
stdout_da = buf_da.getvalue()
stdout_da = stdout_da.replace(str(type(ddf)), str(type(df)))
assert stdout_pd == stdout_da
def test_info():
from io import StringIO
from dask.compatibility import unicode
pandas_format._put_lines = put_lines
test_frames = [
pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]}, index=pd.Int64Index(range(4))), # No RangeIndex in dask
pd.DataFrame()
]
for df in test_frames:
ddf = dd.from_pandas(df, npartitions=4)
_assert_info(df, ddf)
buf = StringIO()
ddf = dd.from_pandas(pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]}, index=range(4)), npartitions=4)
# Verbose=False
ddf.info(buf=buf, verbose=False)
assert buf.getvalue() == unicode("<class 'dask.dataframe.core.DataFrame'>\n"
"Columns: 2 entries, x to y\n"
"dtypes: int64(2)")
# buf=None
assert ddf.info(buf=None) is None
def test_groupby_multilevel_info():
# GH 1844
from io import StringIO
from dask.compatibility import unicode
pandas_format._put_lines = put_lines
df = pd.DataFrame({'A': [1, 1, 2, 2],
'B': [1, 2, 3, 4],
'C': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
g = ddf.groupby(['A', 'B']).sum()
# slight difference between memory repr (single additional space)
_assert_info(g.compute(), g, memory_usage=False)
buf = StringIO()
g.info(buf, verbose=False)
assert buf.getvalue() == unicode("""<class 'dask.dataframe.core.DataFrame'>
Columns: 1 entries, C to C
dtypes: int64(1)""")
# multilevel
g = ddf.groupby(['A', 'B']).agg(['count', 'sum'])
_assert_info(g.compute(), g, memory_usage=False)
buf = StringIO()
g.info(buf, verbose=False)
expected = unicode(textwrap.dedent("""\
<class 'dask.dataframe.core.DataFrame'>
Columns: 2 entries, ('C', 'count') to ('C', 'sum')
dtypes: int64(2)"""))
assert buf.getvalue() == expected
def test_categorize_info():
# assert that we can call info after categorize
# workaround for: https://github.com/pydata/pandas/issues/14368
from io import StringIO
from dask.compatibility import unicode
pandas_format._put_lines = put_lines
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': pd.Series(list('aabc')),
'z': pd.Series(list('aabc'))},
index=pd.Int64Index(range(4))) # No RangeIndex in dask
ddf = dd.from_pandas(df, npartitions=4).categorize(['y'])
# Verbose=False
buf = StringIO()
ddf.info(buf=buf, verbose=True)
expected = unicode("<class 'dask.dataframe.core.DataFrame'>\n"
"Int64Index: 4 entries, 0 to 3\n"
"Data columns (total 3 columns):\n"
"x 4 non-null int64\n"
"y 4 non-null category\n"
"z 4 non-null object\n"
"dtypes: category(1), object(1), int64(1)")
assert buf.getvalue() == expected
def test_gh_1301():
df = pd.DataFrame([['1', '2'], ['3', '4']])
ddf = dd.from_pandas(df, npartitions=2)
ddf2 = ddf.assign(y=ddf[1].astype(int))
assert_eq(ddf2, df.assign(y=df[1].astype(int)))
assert ddf2.dtypes['y'] == np.dtype(int)
def test_timeseries_sorted():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df.reset_index(), npartitions=2)
df.index.name = 'index'
assert_eq(ddf.set_index('index', sorted=True, drop=True), df)
def test_column_assignment():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=2)
orig = ddf.copy()
ddf['z'] = ddf.x + ddf.y
df['z'] = df.x + df.y
assert_eq(df, ddf)
assert 'z' not in orig.columns
def test_columns_assignment():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
df2 = df.assign(y=df.x + 1, z=df.x - 1)
df[['a', 'b']] = df2[['y', 'z']]
ddf2 = ddf.assign(y=ddf.x + 1, z=ddf.x - 1)
ddf[['a', 'b']] = ddf2[['y', 'z']]
assert_eq(df, ddf)
def test_attribute_assignment():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1., 2., 3., 4., 5.]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.y = ddf.x + ddf.y
assert_eq(ddf, df.assign(y=df.x + df.y))
def test_setitem_triggering_realign():
a = dd.from_pandas(pd.DataFrame({"A": range(12)}), npartitions=3)
b = dd.from_pandas(pd.Series(range(12), name='B'), npartitions=4)
a['C'] = b
assert len(a) == 12
def test_inplace_operators():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1., 2., 3., 4., 5.]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.y **= 0.5
assert_eq(ddf.y, df.y ** 0.5)
assert_eq(ddf, df.assign(y=df.y ** 0.5))
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("idx", [
np.arange(100),
sorted(np.random.random(size=100)),
pd.date_range('20150101', periods=100)
])
def test_idxmaxmin(idx, skipna):
df = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'), index=idx)
df.b.iloc[31] = np.nan
df.d.iloc[78] = np.nan
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(df.idxmax(axis=1, skipna=skipna),
ddf.idxmax(axis=1, skipna=skipna))
assert_eq(df.idxmin(axis=1, skipna=skipna),
ddf.idxmin(axis=1, skipna=skipna))
assert_eq(df.idxmax(skipna=skipna), ddf.idxmax(skipna=skipna))
assert_eq(df.idxmax(skipna=skipna),
ddf.idxmax(skipna=skipna, split_every=2))
assert (ddf.idxmax(skipna=skipna)._name !=
ddf.idxmax(skipna=skipna, split_every=2)._name)
assert_eq(df.idxmin(skipna=skipna), ddf.idxmin(skipna=skipna))
assert_eq(df.idxmin(skipna=skipna),
ddf.idxmin(skipna=skipna, split_every=2))
assert (ddf.idxmin(skipna=skipna)._name !=
ddf.idxmin(skipna=skipna, split_every=2)._name)
assert_eq(df.a.idxmax(skipna=skipna), ddf.a.idxmax(skipna=skipna))
assert_eq(df.a.idxmax(skipna=skipna),
ddf.a.idxmax(skipna=skipna, split_every=2))
assert (ddf.a.idxmax(skipna=skipna)._name !=
ddf.a.idxmax(skipna=skipna, split_every=2)._name)
assert_eq(df.a.idxmin(skipna=skipna), ddf.a.idxmin(skipna=skipna))
assert_eq(df.a.idxmin(skipna=skipna),
ddf.a.idxmin(skipna=skipna, split_every=2))
assert (ddf.a.idxmin(skipna=skipna)._name !=
ddf.a.idxmin(skipna=skipna, split_every=2)._name)
def test_idxmaxmin_empty_partitions():
df = pd.DataFrame({'a': [1, 2, 3],
'b': [1.5, 2, 3],
'c': [np.NaN] * 3,
'd': [1, 2, np.NaN]})
empty = df.iloc[:0]
ddf = dd.concat([dd.from_pandas(df, npartitions=1)] +
[dd.from_pandas(empty, npartitions=1)] * 10)
for skipna in [True, False]:
assert_eq(ddf.idxmin(skipna=skipna, split_every=3),
df.idxmin(skipna=skipna))
assert_eq(ddf[['a', 'b', 'd']].idxmin(skipna=skipna, split_every=3),
df[['a', 'b', 'd']].idxmin(skipna=skipna))
assert_eq(ddf.b.idxmax(split_every=3), df.b.idxmax())
# Completely empty raises
ddf = dd.concat([dd.from_pandas(empty, npartitions=1)] * 10)
with pytest.raises(ValueError):
ddf.idxmax().compute()
with pytest.raises(ValueError):
ddf.b.idxmax().compute()
def test_getitem_meta():
data = {'col1': ['a', 'a', 'b'],
'col2': [0, 1, 0]}
df = pd.DataFrame(data=data, columns=['col1', 'col2'])
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(df.col2[df.col1 == 'a'], ddf.col2[ddf.col1 == 'a'])
def test_getitem_multilevel():
pdf = pd.DataFrame({('A', '0') : [1,2,2], ('B', '1') : [1,2,3]})
ddf = dd.from_pandas(pdf, npartitions=3)
assert_eq(pdf['A', '0'], ddf['A', '0'])
assert_eq(pdf[[('A', '0'), ('B', '1')]], ddf[[('A', '0'), ('B', '1')]])
def test_getitem_string_subclass():
df = pd.DataFrame({'column_1': list(range(10))})
ddf = dd.from_pandas(df, npartitions=3)
class string_subclass(str):
pass
column_1 = string_subclass('column_1')
assert_eq(df[column_1], ddf[column_1])
@pytest.mark.parametrize('col_type', [list, np.array, pd.Series, pd.Index])
def test_getitem_column_types(col_type):
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
ddf = dd.from_pandas(df, 2)
cols = col_type(['C', 'A', 'B'])
assert_eq(df[cols], ddf[cols])
def test_diff():
df = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
ddf = dd.from_pandas(df, 5)
assert_eq(ddf.diff(), df.diff())
assert_eq(ddf.diff(0), df.diff(0))
assert_eq(ddf.diff(2), df.diff(2))
assert_eq(ddf.diff(-2), df.diff(-2))
assert_eq(ddf.diff(2, axis=1), df.diff(2, axis=1))
assert_eq(ddf.a.diff(), df.a.diff())
assert_eq(ddf.a.diff(0), df.a.diff(0))
assert_eq(ddf.a.diff(2), df.a.diff(2))
assert_eq(ddf.a.diff(-2), df.a.diff(-2))
assert ddf.diff(2)._name == ddf.diff(2)._name
assert ddf.diff(2)._name != ddf.diff(3)._name
pytest.raises(TypeError, lambda: ddf.diff(1.5))
def test_shift():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df, npartitions=4)
# DataFrame
assert_eq(ddf.shift(), df.shift())
assert_eq(ddf.shift(0), df.shift(0))
assert_eq(ddf.shift(2), df.shift(2))
assert_eq(ddf.shift(-2), df.shift(-2))
assert_eq(ddf.shift(2, axis=1), df.shift(2, axis=1))
# Series
assert_eq(ddf.A.shift(), df.A.shift())
assert_eq(ddf.A.shift(0), df.A.shift(0))
assert_eq(ddf.A.shift(2), df.A.shift(2))
assert_eq(ddf.A.shift(-2), df.A.shift(-2))
with pytest.raises(TypeError):
ddf.shift(1.5)
def test_shift_with_freq():
df = tm.makeTimeDataFrame(30)
# DatetimeIndex
for data_freq, divs1 in [('B', False), ('D', True), ('H', True)]:
df = df.set_index(tm.makeDateIndex(30, freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for freq, divs2 in [('S', True), ('W', False),
(pd.Timedelta(10, unit='h'), True)]:
for d, p in [(ddf, df), (ddf.A, df.A), (ddf.index, df.index)]:
res = d.shift(2, freq=freq)
assert_eq(res, p.shift(2, freq=freq))
assert res.known_divisions == divs2
# Index shifts also work with freq=None
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions == divs1
# PeriodIndex
for data_freq, divs in [('B', False), ('D', True), ('H', True)]:
df = df.set_index(pd.period_range('2000-01-01', periods=30,
freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for d, p in [(ddf, df), (ddf.A, df.A)]:
res = d.shift(2, freq=data_freq)
assert_eq(res, p.shift(2, freq=data_freq))
assert res.known_divisions == divs
# PeriodIndex.shift doesn't have `freq` parameter
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions == divs
with pytest.raises(ValueError):
ddf.index.shift(2, freq='D') # freq keyword not supported
# TimedeltaIndex
for data_freq in ['T', 'D', 'H']:
df = df.set_index(tm.makeTimedeltaIndex(30, freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for freq in ['S', pd.Timedelta(10, unit='h')]:
for d, p in [(ddf, df), (ddf.A, df.A), (ddf.index, df.index)]:
res = d.shift(2, freq=freq)
assert_eq(res, p.shift(2, freq=freq))
assert res.known_divisions
# Index shifts also work with freq=None
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions
# Other index types error
df = tm.makeDataFrame()
ddf = dd.from_pandas(df, npartitions=4)
pytest.raises(NotImplementedError, lambda: ddf.shift(2, freq='S'))
pytest.raises(NotImplementedError, lambda: ddf.A.shift(2, freq='S'))
pytest.raises(NotImplementedError, lambda: ddf.index.shift(2))
@pytest.mark.parametrize('method', ['first', 'last'])
def test_first_and_last(method):
f = lambda x, offset: getattr(x, method)(offset)
freqs = ['12h', 'D']
offsets = ['0d', '100h', '20d', '20B', '3W', '3M', '400d', '13M']
for freq in freqs:
index = pd.date_range('1/1/2000', '1/1/2001', freq=freq)[::4]
df = pd.DataFrame(np.random.random((len(index), 4)), index=index,
columns=['A', 'B', 'C', 'D'])
ddf = dd.from_pandas(df, npartitions=10)
for offset in offsets:
assert_eq(f(ddf, offset), f(df, offset))
assert_eq(f(ddf.A, offset), f(df.A, offset))
@pytest.mark.parametrize('npartitions', [1, 4, 20])
@pytest.mark.parametrize('split_every', [2, 5])
@pytest.mark.parametrize('split_out', [None, 1, 5, 20])
def test_hash_split_unique(npartitions, split_every, split_out):
from string import ascii_lowercase
s = pd.Series(np.random.choice(list(ascii_lowercase), 1000, replace=True))
ds = dd.from_pandas(s, npartitions=npartitions)
dropped = ds.unique(split_every=split_every, split_out=split_out)
dsk = dropped.__dask_optimize__(dropped.dask, dropped.__dask_keys__())
from dask.core import get_deps
dependencies, dependents = get_deps(dsk)
assert len([k for k, v in dependencies.items() if not v]) == npartitions
assert dropped.npartitions == (split_out or 1)
assert sorted(dropped.compute(scheduler='sync')) == sorted(s.unique())
@pytest.mark.parametrize('split_every', [None, 2])
def test_split_out_drop_duplicates(split_every):
x = np.concatenate([np.arange(10)] * 100)[:, None]
y = x.copy()
z = np.concatenate([np.arange(20)] * 50)[:, None]
rs = np.random.RandomState(1)
rs.shuffle(x)
rs.shuffle(y)
rs.shuffle(z)
df = pd.DataFrame(np.concatenate([x, y, z], axis=1), columns=['x', 'y', 'z'])
ddf = dd.from_pandas(df, npartitions=20)
for subset, keep in product([None, ['x', 'z']], ['first', 'last']):
sol = df.drop_duplicates(subset=subset, keep=keep)
res = ddf.drop_duplicates(subset=subset, keep=keep,
split_every=split_every, split_out=10)
assert res.npartitions == 10
assert_eq(sol, res)
@pytest.mark.parametrize('split_every', [None, 2])
def test_split_out_value_counts(split_every):
df = pd.DataFrame({'x': [1, 2, 3] * 100})
ddf = dd.from_pandas(df, npartitions=5)
assert ddf.x.value_counts(split_out=10, split_every=split_every).npartitions == 10
assert_eq(ddf.x.value_counts(split_out=10, split_every=split_every), df.x.value_counts())
def test_values():
from dask.array.utils import assert_eq
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
ddf = dd.from_pandas(df, 2)
assert_eq(df.values, ddf.values)
assert_eq(df.x.values, ddf.x.values)
assert_eq(df.y.values, ddf.y.values)
assert_eq(df.index.values, ddf.index.values)
def test_copy():
df = pd.DataFrame({'x': [1, 2, 3]})
a = dd.from_pandas(df, npartitions=2)
b = a.copy()
a['y'] = a.x * 2
assert_eq(b, df)
df['y'] = df.x * 2
def test_del():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
b = a.copy()
del a['x']
assert_eq(b, df)
del df['x']
assert_eq(a, df)
@pytest.mark.parametrize('index', [True, False])
@pytest.mark.parametrize('deep', [True, False])
def test_memory_usage(index, deep):
df = pd.DataFrame({'x': [1, 2, 3],
'y': [1.0, 2.0, 3.0],
'z': ['a', 'b', 'c']})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(df.memory_usage(index=index, deep=deep),
ddf.memory_usage(index=index, deep=deep))
assert (df.x.memory_usage(index=index, deep=deep) ==
ddf.x.memory_usage(index=index, deep=deep).compute())
@pytest.mark.parametrize('reduction', ['sum', 'mean', 'std', 'var', 'count',
'min', 'max', 'idxmin', 'idxmax',
'prod', 'all', 'sem'])
def test_dataframe_reductions_arithmetic(reduction):
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1.1, 2.2, 3.3, 4.4, 5.5]})
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(ddf - (getattr(ddf, reduction)() + 1),
df - (getattr(df, reduction)() + 1))
def test_datetime_loc_open_slicing():
dtRange = pd.date_range('01.01.2015','05.05.2015')
df = pd.DataFrame(np.random.random((len(dtRange), 2)), index=dtRange)
ddf = dd.from_pandas(df, npartitions=5)
assert_eq(df.loc[:'02.02.2015'], ddf.loc[:'02.02.2015'])
assert_eq(df.loc['02.02.2015':], ddf.loc['02.02.2015':])
assert_eq(df[0].loc[:'02.02.2015'], ddf[0].loc[:'02.02.2015'])
assert_eq(df[0].loc['02.02.2015':], ddf[0].loc['02.02.2015':])
def test_to_datetime():
df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(pd.to_datetime(df), dd.to_datetime(ddf))
s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 100)
ds = dd.from_pandas(s, npartitions=10)
assert_eq(pd.to_datetime(s, infer_datetime_format=True),
dd.to_datetime(ds, infer_datetime_format=True))
def test_to_timedelta():
s = pd.Series(range(10))
ds = dd.from_pandas(s, npartitions=2)
assert_eq(pd.to_timedelta(s), dd.to_timedelta(ds))
assert_eq(pd.to_timedelta(s, unit='h'), dd.to_timedelta(ds, unit='h'))
s = pd.Series([1, 2, 'this will error'])
ds = dd.from_pandas(s, npartitions=2)
assert_eq(pd.to_timedelta(s, errors='coerce'),
dd.to_timedelta(ds, errors='coerce'))
@pytest.mark.skipif(PANDAS_VERSION < '0.22.0',
reason="No isna method")
@pytest.mark.parametrize('values', [[np.NaN, 0], [1, 1]])
def test_isna(values):
s = pd.Series(values)
ds = dd.from_pandas(s, npartitions=2)
assert_eq(pd.isna(s), dd.isna(ds))
@pytest.mark.parametrize('drop', [0, 9])
def test_slice_on_filtered_boundary(drop):
# https://github.com/dask/dask/issues/2211
x = np.arange(10)
x[[5, 6]] -= 2
df = pd.DataFrame({"A": x, "B": np.arange(len(x))})
pdf = df.set_index("A").query("B != {}".format(drop))
ddf = dd.from_pandas(df, 1).set_index("A").query("B != {}".format(drop))
result = dd.concat([ddf, ddf.rename(columns={"B": "C"})], axis=1)
expected = pd.concat([pdf, pdf.rename(columns={"B": "C"})], axis=1)
assert_eq(result, expected)
def test_boundary_slice_nonmonotonic():
x = np.array([-1, -2, 2, 4, 3])
df = pd.DataFrame({"B": range(len(x))}, index=x)
result = methods.boundary_slice(df, 0, 4)
expected = df.iloc[2:]
tm.assert_frame_equal(result, expected)
result = methods.boundary_slice(df, -1, 4)
expected = df.drop(-2)
tm.assert_frame_equal(result, expected)
result = methods.boundary_slice(df, -2, 3)
expected = df.drop(4)
tm.assert_frame_equal(result, expected)
result = methods.boundary_slice(df, -2, 3.5)
expected = df.drop(4)
tm.assert_frame_equal(result, expected)
result = methods.boundary_slice(df, -2, 4)
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('start, stop, right_boundary, left_boundary, drop', [
(-1, None, False, False, [-1, -2]),
(-1, None, False, True, [-2]),
(None, 3, False, False, [3, 4]),
(None, 3, True, False, [4]),
# Missing keys
(-.5, None, False, False, [-1, -2]),
(-.5, None, False, True, [-1, -2]),
(-1.5, None, False, True, [-2]),
(None, 3.5, False, False, [4]),
(None, 3.5, True, False, [4]),
(None, 2.5, False, False, [3, 4]),
])
def test_with_boundary(start, stop, right_boundary, left_boundary, drop):
x = np.array([-1, -2, 2, 4, 3])
df = pd.DataFrame({"B": range(len(x))}, index=x)
result = methods.boundary_slice(df, start, stop, right_boundary, left_boundary)
expected = df.drop(drop)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('index, left, right', [
(range(10), 0, 9),
(range(10), -1, None),
(range(10), None, 10),
([-1, 0, 2, 1], None, None),
([-1, 0, 2, 1], -1, None),
([-1, 0, 2, 1], None, 2),
([-1, 0, 2, 1], -2, 3),
(pd.date_range("2017", periods=10), None, None),
(pd.date_range("2017", periods=10), pd.Timestamp("2017"), None),
(pd.date_range("2017", periods=10), None, pd.Timestamp("2017-01-10")),
(pd.date_range("2017", periods=10), pd.Timestamp("2016"), None),
(pd.date_range("2017", periods=10), None, pd.Timestamp("2018")),
])
def test_boundary_slice_same(index, left, right):
df = pd.DataFrame({"A": range(len(index))}, index=index)
result = methods.boundary_slice(df, left, right)
tm.assert_frame_equal(result, df)
def test_better_errors_object_reductions():
# GH2452
s = pd.Series(['a', 'b', 'c', 'd'])
ds = dd.from_pandas(s, npartitions=2)
with pytest.raises(ValueError) as err:
ds.mean()
assert str(err.value) == "`mean` not supported with object series"
def test_sample_empty_partitions():
@dask.delayed
def make_df(n):
return pd.DataFrame(np.zeros((n, 4)), columns=list('abcd'))
ddf = dd.from_delayed([make_df(0), make_df(100), make_df(0)])
ddf2 = ddf.sample(frac=0.2)
# smoke test sample on empty partitions
res = ddf2.compute()
assert res.dtypes.equals(ddf2.dtypes)
def test_coerce():
df = pd.DataFrame(np.arange(100).reshape((10,10)))
ddf = dd.from_pandas(df, npartitions=2)
funcs = (int, float, complex)
for d,t in product(funcs,(ddf, ddf[0])):
pytest.raises(TypeError, lambda: t(d))
def test_bool():
df = pd.DataFrame(np.arange(100).reshape((10,10)))
ddf = dd.from_pandas(df, npartitions=2)
conditions = [ddf, ddf[0], ddf == ddf, ddf[0] == ddf[0]]
for cond in conditions:
with pytest.raises(ValueError):
bool(cond)
def test_cumulative_multiple_columns():
# GH 3037
df = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
ddf = dd.from_pandas(df, 5)
for d in [ddf, df]:
for c in df.columns:
d[c + 'cs'] = d[c].cumsum()
d[c + 'cmin'] = d[c].cummin()
d[c + 'cmax'] = d[c].cummax()
d[c + 'cp'] = d[c].cumprod()
assert_eq(ddf, df)
@pytest.mark.parametrize('func', [np.asarray, M.to_records])
def test_map_partition_array(func):
from dask.array.utils import assert_eq
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [6.0, 7.0, 8.0, 9.0, 10.0]},
index=['a', 'b', 'c', 'd', 'e'])
ddf = dd.from_pandas(df, npartitions=2)
for pre in [lambda a: a,
lambda a: a.x,
lambda a: a.y,
lambda a: a.index]:
try:
expected = func(pre(df))
except Exception:
continue
x = pre(ddf).map_partitions(func)
assert_eq(x, expected)
assert isinstance(x, da.Array)
assert x.chunks[0] == (np.nan, np.nan)
def test_map_partition_sparse():
sparse = pytest.importorskip('sparse')
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [6.0, 7.0, 8.0, 9.0, 10.0]},
index=['a', 'b', 'c', 'd', 'e'])
ddf = dd.from_pandas(df, npartitions=2)
def f(d):
return sparse.COO(np.array(d))
for pre in [lambda a: a,
lambda a: a.x]:
expected = f(pre(df))
result = pre(ddf).map_partitions(f)
assert isinstance(result, da.Array)
computed = result.compute()
assert (computed.data == expected.data).all()
assert (computed.coords == expected.coords).all()
def test_mixed_dask_array_operations():
df = pd.DataFrame({'x': [1, 2, 3]}, index=[4, 5, 6])
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(df.x + df.x.values,
ddf.x + ddf.x.values)
assert_eq(df.x.values + df.x,
ddf.x.values + ddf.x)
assert_eq(df.x + df.index.values,
ddf.x + ddf.index.values)
assert_eq(df.index.values + df.x,
ddf.index.values + ddf.x)
def test_mixed_dask_array_operations_errors():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5]}, index=[4, 5, 6, 7, 8])
ddf = dd.from_pandas(df, npartitions=2)
x = da.arange(5, chunks=((1, 4),))
x._chunks = ((np.nan, np.nan),)
with pytest.raises(ValueError):
(ddf.x + x).compute()
x = da.arange(5, chunks=((2, 2, 1),))
with pytest.raises(ValueError) as info:
ddf.x + x
assert 'add' in str(info.value)
def test_mixed_dask_array_multi_dimensional():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [5., 6., 7., 8., 9.]},
columns=['x', 'y'])
ddf = dd.from_pandas(df, npartitions=2)
x = (df.values + 1).astype(float)
dx = (ddf.values + 1).astype(float)
assert_eq(ddf + dx + 1, df + x + 1)
assert_eq(ddf + dx.rechunk((None, 1)) + 1, df + x + 1)
assert_eq(ddf[['y', 'x']] + dx + 1, df[['y', 'x']] + x + 1)
def test_meta_raises():
# Raise when we use a user defined fucntion
s = pd.Series(['abcd', 'abcd'])
ds = dd.from_pandas(s, npartitions=2)
try:
ds.map(lambda x: x[3])
except ValueError as e:
assert "meta=" in str(e)
# But not otherwise
df = pd.DataFrame({'a': ['x', 'y', 'y'],
'b': ['x', 'y', 'z'],
'c': [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=1)
with pytest.raises(Exception) as info:
ddf.a + ddf.c
assert "meta=" not in str(info.value)
def test_dask_dataframe_holds_scipy_sparse_containers():
sparse = pytest.importorskip('scipy.sparse')
da = pytest.importorskip('dask.array')
x = da.random.random((1000, 10), chunks=(100, 10))
x[x < 0.9] = 0
df = dd.from_dask_array(x)
y = df.map_partitions(sparse.csr_matrix)
assert isinstance(y, da.Array)
vs = y.to_delayed().flatten().tolist()
values = dask.compute(*vs, scheduler='single-threaded')
assert all(isinstance(v, sparse.csr_matrix) for v in values)
| gpl-3.0 |
halflings/crosscultural-media | pca_transformer.py | 1 | 1628 | from itertools import cycle
import mongoengine
#import matplotlib.pyplot as plt
import sklearn.decomposition
import config
from crawler import enqueue_query, process_query
import sys
# Converts the 'array' type returned from pca.transform to a Python array
def toArray(projection):
data = []
for projPoint in projection:
point = []
for comp in projPoint:
point.append(comp)
data.append(point)
return data
# Projects data points for all languages in the given query
def project(query_job):
pca = sklearn.decomposition.PCA(2)
# Getting all datapoints to learn the PCA / project individual articles
total_datapoints = []
datapoints_by_language = dict()
for query in query_job.queries:
datapoints = [a.score_vector for a in query.articles]
total_datapoints += datapoints
datapoints_by_language[query.language] = datapoints
print >> sys.stderr,len(total_datapoints)
# Fitting the PCA
pca.fit(total_datapoints)
# Projecting articles by language
results = []
for query in query_job.queries:
datapoints = datapoints_by_language[query.language]
projection = pca.transform(datapoints)
results.append(dict(query=query.text, language=query.language, results=toArray(projection)))
return results
def get_results(query, language):
db = mongoengine.connect(config.db_name)
db.drop_database(config.db_name)
query_job = enqueue_query(query, language)
process_query(query_job)
return project(query_job)
if __name__ == '__main__':
print get_results('coffee', 'en')
| apache-2.0 |
Froff/TFY4115-Simulering | python/main.py | 1 | 1573 | #!/usr/bin/python3
import matplotlib.pyplot as plt
import matplotlib.text as txt
import numpy as np
import math
import sys
from Slope import Slope
from SlopeDict import slopeDict
from Simulation import Simulation
from PhysicalSeries import PhysicalSeries
import os.path
slope_type = "linje"
if __name__ == "__main__":
try:
slope_type = sys.argv[1]
except IndexError:
print("Usage: main.py <linje|optimal|sinus>")
exit(2)
data_slope = slopeDict[slope_type]["data"]
theory_slope = slopeDict[slope_type]["teori"]
physical_series = PhysicalSeries(slope_type)
t_axis = [0]
data_sim = Simulation(data_slope, momentofintertiacoefficient = 2/3)
theory_sim = Simulation(theory_slope, momentofintertiacoefficient = 2/3)
data_sim.runSimulation()
theory_sim.runSimulation()
print("Slope type is {}".format(slope_type))
print("End time for data based slope: {}".format(data_sim.t[-1]))
print("End time for theoretical slope: {}".format(theory_sim.t[-1]))
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
data_plot = ax.plot(data_sim.t, data_sim.x, color='blue')
theory_plot = ax.plot(theory_sim.t, theory_sim.x, color='orange')
for series in physical_series.series_collection:
#### Plots x(t) for a single physical test ####
physical_plot = ax.plot(series[0], series[1], color='red', linewidth=0.2)
ax.set_xlabel("t (s)")
ax.set_ylabel("x (cm)")
ax.set_title("Bane av typen {}".format(slope_type))
ax.legend((data_plot[0], theory_plot[0], physical_plot[0]), ("Simulering, fysisk bane", "Simulering, teoretisk bane", "Forsøk"))
#plt.show()
| mit |
MartinDelzant/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-0.18.1/examples/ensemble/plot_gradient_boosting_oob.py | 82 | 4768 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_splits=3):
cv = KFold(n_splits=n_splits)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv.split(X_train, y_train):
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_splits
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
carlgogo/vip_exoplanets | vip_hci/negfc/simplex_optim.py | 2 | 25988 | #! /usr/bin/env python
"""
Module with simplex (Nelder-Mead) optimization for defining the flux and
position of a companion using the Negative Fake Companion.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from .simplex_fmerit import chisquare, get_mu_and_sigma
from ..pca import pca_annulus
from ..var import frame_center
from ..conf import time_ini, timing
from ..conf.utils_conf import sep
__author__ = 'O. Wertz, C. A. Gomez Gonzalez, V. Christiaens'
__all__ = ['firstguess']
def firstguess_from_coord(planet, center, cube, angs, PLSC, psf, fwhm,
annulus_width, aperture_radius, ncomp, cube_ref=None,
svd_mode='lapack', scaling=None, fmerit='sum',
imlib='opencv', interpolation='lanczos4',
collapse='median', algo=pca_annulus, delta_rot=1,
algo_options={}, f_range=None, transmission=None,
mu_sigma=None, weights=None, plot=False,
verbose=True, save=False, debug=False):
""" Determine a first guess for the flux of a companion at a given position
in the cube by doing a simple grid search evaluating the reduced chi2.
Parameters
----------
planet: numpy.array
The (x,y) position of the planet in the pca processed cube.
center: numpy.array
The (x,y) position of the cube center.
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
PLSC: float
The platescale, in arcsec per pixel.
psf: numpy.array
The scaled psf expressed as a numpy.array.
fwhm : float
The FHWM in pixels.
annulus_width: int, optional
The width in pixels of the annulus on which the PCA is done.
aperture_radius: int, optional
The radius of the circular aperture in terms of the FWHM.
ncomp: int
The number of principal components.
cube_ref : numpy ndarray, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Figure of merit to be used, if mu_sigma is set to None.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
algo: python routine, opt {pca_annulus, pca_annular, pca, custom}
Routine to be used to model and subtract the stellar PSF. From an input
cube, derotation angles, and optional arguments, it should return a
post-processed frame.
delta_rot: float, optional
If algo is set to pca_annular, delta_rot is the angular threshold used
to select frames in the PCA library (see description of pca_annular).
algo_options: dict, opt
Dictionary with additional parameters for the pca algorithm (e.g. tol,
min_frames_lib, max_frames_lib). Note: arguments such as svd_mode,
scaling imlib, interpolation or collapse can also be included in this
dict (the latter are also kept as function arguments for compatibility
with older versions of vip).
f_range: numpy.array, optional
The range of flux tested values. If None, 20 values between 0 and 5000
are tested.
transmission: numpy array, optional
Array with 2 columns. First column is the radial separation in pixels.
Second column is the off-axis transmission (between 0 and 1) at the
radial separation given in column 1.
mu_sigma: tuple of 2 floats or None, opt
If set to None: not used, and falls back to original version of the
algorithm, using fmerit. Otherwise, should be a tuple of 2 elements,
containing the mean and standard deviation of pixel intensities in an
annulus centered on the location of the companion, excluding the area
directly adjacent to the companion.
weights : 1d array, optional
If provided, the negative fake companion fluxes will be scaled according
to these weights before injection in the cube. Can reflect changes in
the observing conditions throughout the sequence.
plot: boolean, optional
If True, the figure chi2 vs. flux is displayed.
verbose: boolean
If True, display intermediate info in the shell.
save: boolean, optional
If True, the figure chi2 vs. flux is saved as .pdf if plot is also True
Returns
-------
out : numpy.array
The radial coordinates and the flux of the companion.
"""
xy = planet-center
r0 = np.sqrt(xy[0]**2 + xy[1]**2)
theta0 = np.mod(np.arctan2(xy[1], xy[0]) / np.pi*180, 360)
if f_range is not None:
n = f_range.shape[0]
else:
n = 100
f_range = np.linspace(0, 5000, n)
chi2r = []
if verbose:
print('Step | flux | chi2r')
counter = 0
for j, f_guess in enumerate(f_range):
chi2r.append(chisquare((r0, theta0, f_guess), cube, angs, PLSC, psf,
fwhm, annulus_width, aperture_radius,
(r0, theta0), ncomp, cube_ref, svd_mode,
scaling, fmerit, collapse, algo, delta_rot,
imlib, interpolation, algo_options, transmission,
mu_sigma, weights, debug))
if chi2r[j] > chi2r[j-1]:
counter += 1
if counter == 4:
break
if verbose:
print('{}/{} {:.3f} {:.3f}'.format(j+1, n, f_guess, chi2r[j]))
chi2r = np.array(chi2r)
f0 = f_range[chi2r.argmin()]
if plot:
plt.figure(figsize=(8, 4))
plt.title('$\chi^2_{r}$ vs flux')
plt.xlim(f_range[0], f_range[:chi2r.shape[0]].max())
plt.ylim(chi2r.min()*0.9, chi2r.max()*1.1)
plt.plot(f_range[:chi2r.shape[0]], chi2r, linestyle='-', color='gray',
marker='.', markerfacecolor='r', markeredgecolor='r')
plt.xlabel('flux')
plt.ylabel(r'$\chi^2_{r}$')
plt.grid('on')
if save:
plt.savefig('chi2rVSflux.pdf')
if plot:
plt.show()
return r0, theta0, f0
def firstguess_simplex(p, cube, angs, psf, plsc, ncomp, fwhm, annulus_width,
aperture_radius, cube_ref=None, svd_mode='lapack',
scaling=None, fmerit='sum', imlib='opencv',
interpolation='lanczos4', collapse='median',
algo=pca_annulus, delta_rot=1, algo_options={},
p_ini=None, transmission=None, mu_sigma=None,
weights=None, force_rPA=False, options=None,
verbose=False, **kwargs):
"""
Determine the position of a companion using the negative fake companion
technique and a standard minimization algorithm (Default=Nelder-Mead) .
Parameters
----------
p : np.array
Estimate of the candidate position.
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
psf: numpy.array
The scaled psf expressed as a numpy.array.
plsc: float
The platescale, in arcsec per pixel.
ncomp: int or None
The number of principal components.
fwhm : float
The FHWM in pixels.
annulus_width: int, optional
The width in pixels of the annulus on which the PCA is done.
aperture_radius: int, optional
The radius of the circular aperture in terms of the FWHM.
cube_ref : numpy ndarray, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Figure of merit to be used, if mu_sigma is set to None.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
algo: python routine, opt {pca_annulus, pca_annular, pca, custom}
Routine to be used to model and subtract the stellar PSF. From an input
cube, derotation angles, and optional arguments, it should return a
post-processed frame.
delta_rot: float, optional
If algo is set to pca_annular, delta_rot is the angular threshold used
to select frames in the PCA library (see description of pca_annular).
algo_options: dict, opt
Dictionary with additional parameters for the pca algorithm (e.g. tol,
min_frames_lib, max_frames_lib). Note: arguments such as svd_mode,
scaling imlib, interpolation or collapse can also be included in this
dict (the latter are also kept as function arguments for compatibility
with older versions of vip).
p_ini : np.array
Position (r, theta) of the circular aperture center.
transmission: numpy array, optional
Array with 2 columns. First column is the radial separation in pixels.
Second column is the off-axis transmission (between 0 and 1) at the
radial separation given in column 1.
mu_sigma: tuple of 2 floats or None, opt
If set to None: not used, and falls back to original version of the
algorithm, using fmerit. Otherwise, should be a tuple of 2 elements,
containing the mean and standard deviation of pixel intensities in an
annulus centered on the location of the companion, excluding the area
directly adjacent to the companion.
weights : 1d array, optional
If provided, the negative fake companion fluxes will be scaled according
to these weights before injection in the cube. Can reflect changes in
the observing conditions throughout the sequence.
force_rPA: bool, optional
Whether to only search for optimal flux, provided (r,PA).
options: dict, optional
The scipy.optimize.minimize options.
verbose : boolean, optional
If True, additional information is printed out.
**kwargs: optional
Optional arguments to the scipy.optimize.minimize function
Returns
-------
out : scipy.optimize.minimize solution object
The solution of the minimization algorithm.
"""
if verbose:
print('\nNelder-Mead minimization is running...')
if p_ini is None:
p_ini = p
if force_rPA:
p_t = (p[-1],)
p_ini = (p[0],p[1])
else:
p_t = p
solu = minimize(chisquare, p_t, args=(cube, angs, plsc, psf, fwhm,
annulus_width, aperture_radius, p_ini,
ncomp, cube_ref, svd_mode, scaling,
fmerit, collapse, algo, delta_rot,
imlib, interpolation, algo_options,
transmission, mu_sigma, weights,
force_rPA),
method='Nelder-Mead', options=options, **kwargs)
if verbose:
print(solu)
return solu
def firstguess(cube, angs, psfn, ncomp, plsc, planets_xy_coord, fwhm=4,
annulus_width=4, aperture_radius=1, cube_ref=None,
svd_mode='lapack', scaling=None, fmerit='sum', imlib='opencv',
interpolation='lanczos4', collapse='median', algo=pca_annulus,
delta_rot=1, p_ini=None, f_range=None, transmission=None,
mu_sigma=None, wedge=None, weights=None, force_rPA= False,
algo_options={}, simplex=True, simplex_options=None, plot=False,
verbose=True, save=False):
""" Determines a first guess for the position and the flux of a planet.
We process the cube without injecting any negative fake companion.
This leads to the visual detection of the planet(s). For each of them,
one can estimate the (x,y) coordinates in pixel for the position of the
star, as well as the planet(s).
From the (x,y) coordinates in pixels for the star and planet(s), we can
estimate a preliminary guess for the position and flux for each planet
by using the method "firstguess_from_coord". The argument "f_range" allows
to indicate prior limits for the flux (optional, default: None).
This step can be reiterate to refine the preliminary guess for the flux.
We can go a step further by using a Simplex Nelder_Mead minimization to
estimate the first guess based on the preliminary guess.
Parameters
----------
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
psfn: numpy.array
The centered and normalized (flux in a 1*FWHM aperture must equal 1)
PSF 2d-array.
ncomp: int
The number of principal components.
plsc: float
The platescale, in arcsec per pixel.
planets_xy_coord: array or list
The list of (x,y) positions of the planets.
fwhm : float, optional
The FHWM in pixels.
annulus_width: int, optional
The width in pixels of the annulus on which the PCA is done.
aperture_radius: int, optional
The radius of the circular aperture in terms of the FWHM.
cube_ref : numpy ndarray, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Figure of merit to be used, if mu_sigma is set to None.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, opt
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
algo: python routine, opt {pca_annulus, pca_annular, pca, custom}
Routine to be used to model and subtract the stellar PSF. From an input
cube, derotation angles, and optional arguments, it should return a
post-processed frame.
delta_rot: float, optional
If algo is set to pca_annular, delta_rot is the angular threshold used
to select frames in the PCA library (see description of pca_annular).
p_ini: numpy.array
Position (r, theta) of the circular aperture center.
f_range: numpy.array, optional
The range of flux tested values. If None, 20 values between 0 and 5000
are tested.
transmission: numpy array, optional
Array with 2 columns. First column is the radial separation in pixels.
Second column is the off-axis transmission (between 0 and 1) at the
radial separation given in column 1.
mu_sigma: tuple of 2 floats, bool or None, opt
If set to None: not used, and falls back to original version of the
algorithm, using fmerit.
If a tuple of 2 elements: should be the mean and standard deviation of
pixel intensities in an annulus centered on the lcoation of the
companion candidate, excluding the area directly adjacent to the CC.
If set to anything else, but None/False/tuple: will compute said mean
and standard deviation automatically.
wedge: tuple, opt
Range in theta where the mean and standard deviation are computed in an
annulus defined in the PCA image. If None, it will be calculated
automatically based on initial guess and derotation angles to avoid.
If some disc signal is present elsewhere in the annulus, it is
recommended to provide wedge manually. The provided range should be
continuous and >0. E.g. provide (270, 370) to consider a PA range
between [-90,+10].
weights : 1d array, optional
If provided, the negative fake companion fluxes will be scaled according
to these weights before injection in the cube. Can reflect changes in
the observing conditions throughout the sequence.
force_rPA: bool, optional
Whether to only search for optimal flux, provided (r,PA).
algo_options: dict, opt
Dictionary with additional parameters for the pca algorithm (e.g. tol,
min_frames_lib, max_frames_lib). Note: arguments such as svd_mode,
scaling imlib, interpolation or collapse can also be included in this
dict (the latter are also kept as function arguments for compatibility
with older versions of vip).
simplex: bool, optional
If True, the Nelder-Mead minimization is performed after the flux grid
search.
simplex_options: dict, optional
The scipy.optimize.minimize options.
plot: boolean, optional
If True, the figure chi2 vs. flux is displayed.
verbose: bool, optional
If True, display intermediate info in the shell.
save: bool, optional
If True, the figure chi2 vs. flux is saved.
Returns
-------
out : The radial coordinates and the flux of the companion.
Notes
-----
Polar angle is not the conventional NORTH-TO-EAST P.A.
"""
if verbose:
start_time = time_ini()
planets_xy_coord = np.array(planets_xy_coord)
n_planet = planets_xy_coord.shape[0]
center_xy_coord = np.array(frame_center(cube[0]))
r_0 = np.zeros(n_planet)
theta_0 = np.zeros_like(r_0)
f_0 = np.zeros_like(r_0)
if weights is not None:
if not len(weights)==cube.shape[0]:
raise TypeError("Weights should have same length as cube axis 0")
norm_weights = weights/np.sum(weights)
else:
norm_weights=weights
for index_planet in range(n_planet):
if verbose:
print('\n'+sep)
print(' Planet {} '.format(index_planet))
print(sep+'\n')
msg2 = 'Planet {}: flux estimation at the position [{},{}], '
msg2 += 'running ...'
print(msg2.format(index_planet, planets_xy_coord[index_planet, 0],
planets_xy_coord[index_planet, 1]))
# Measure mu and sigma once in the annulus (instead of each MCMC step)
if isinstance(mu_sigma,tuple):
if len(mu_sigma)!=2:
raise TypeError("If a tuple, mu_sigma must have 2 elements")
elif mu_sigma is not None:
xy = planets_xy_coord[index_planet]-center_xy_coord
r0 = np.sqrt(xy[0]**2 + xy[1]**2)
theta0 = np.mod(np.arctan2(xy[1], xy[0]) / np.pi*180, 360)
mu_sigma = get_mu_and_sigma(cube, angs, ncomp, annulus_width,
aperture_radius, fwhm, r0,
theta0, cube_ref=cube_ref,
wedge=wedge, svd_mode=svd_mode,
scaling=scaling, algo=algo,
delta_rot=delta_rot, imlib=imlib,
interpolation=interpolation,
collapse=collapse, weights=norm_weights,
algo_options=algo_options)
res_init = firstguess_from_coord(planets_xy_coord[index_planet],
center_xy_coord, cube, angs, plsc,
psfn, fwhm, annulus_width,
aperture_radius, ncomp,
f_range=f_range, cube_ref=cube_ref,
svd_mode=svd_mode, scaling=scaling,
fmerit=fmerit, imlib=imlib,
collapse=collapse, algo=algo,
delta_rot=delta_rot,
interpolation=interpolation,
algo_options=algo_options,
transmission=transmission,
mu_sigma=mu_sigma, weights=weights,
plot=plot, verbose=verbose, save=save)
r_pre, theta_pre, f_pre = res_init
if verbose:
msg3 = 'Planet {}: preliminary guess: (r, theta, f)=({:.1f}, '
msg3 += '{:.1f}, {:.1f})'
print(msg3.format(index_planet,r_pre, theta_pre, f_pre))
if simplex or force_rPA:
if verbose:
msg4 = 'Planet {}: Simplex Nelder-Mead minimization, '
msg4 += 'running ...'
print(msg4.format(index_planet))
if simplex_options is None:
simplex_options = {'xatol': 1e-6, 'fatol': 1e-6, 'maxiter': 800,
'maxfev': 2000}
res = firstguess_simplex((r_pre, theta_pre, f_pre), cube, angs,
psfn, plsc, ncomp, fwhm, annulus_width,
aperture_radius, cube_ref=cube_ref,
svd_mode=svd_mode, scaling=scaling,
fmerit=fmerit, imlib=imlib,
interpolation=interpolation,
collapse=collapse, algo=algo,
delta_rot=delta_rot, algo_options=algo_options,
p_ini=p_ini, transmission=transmission,
mu_sigma=mu_sigma, weights=weights,
force_rPA=force_rPA,
options=simplex_options, verbose=False)
if force_rPA:
r_0[index_planet], theta_0[index_planet] = (r_pre, theta_pre)
f_0[index_planet], = res.x
else:
r_0[index_planet], theta_0[index_planet], f_0[index_planet] = res.x
if verbose:
msg5 = 'Planet {}: Success: {}, nit: {}, nfev: {}, chi2r: {}'
print(msg5.format(index_planet, res.success, res.nit, res.nfev,
res.fun))
print('message: {}'.format(res.message))
else:
if verbose:
msg4bis = 'Planet {}: Simplex Nelder-Mead minimization skipped.'
print(msg4bis.format(index_planet))
r_0[index_planet] = r_pre
theta_0[index_planet] = theta_pre
f_0[index_planet] = f_pre
if verbose:
centy, centx = frame_center(cube[0])
posy = r_0 * np.sin(np.deg2rad(theta_0[index_planet])) + centy
posx = r_0 * np.cos(np.deg2rad(theta_0[index_planet])) + centx
msg6 = 'Planet {}: simplex result: (r, theta, f)=({:.3f}, {:.3f}'
msg6 += ', {:.3f}) at \n (X,Y)=({:.2f}, {:.2f})'
print(msg6.format(index_planet, r_0[index_planet],
theta_0[index_planet], f_0[index_planet],
posx[0], posy[0]))
if verbose:
print('\n', sep, '\nDONE !\n', sep)
timing(start_time)
return r_0, theta_0, f_0 | bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/setup.py | 1 | 11778 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
import subprocess
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
cython_hash_file = os.path.join(cwd, 'cythonize.dat')
if os.path.exists(cython_hash_file):
os.unlink(cython_hash_file)
print('Will remove generated .c files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
scipy_min_version = '0.9'
numpy_min_version = '1.6.1'
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(scipy_min_version)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable, os.path.join(cwd,
'build_tools',
'cythonize.py'),
'sklearn'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info',
'--version',
'clean'))):
# For these actions, NumPy is not required, nor Cythonization
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
numpy_min_version)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
scipy_min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn web: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
if len(sys.argv) >= 2 and sys.argv[1] not in 'config':
# Cythonize if needed
print('Generating cython files')
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
# Generate Cython sources, unless building from source release
generate_cython()
# Clean left-over .so file
for dirpath, dirnames, filenames in os.walk(
os.path.join(cwd, 'sklearn')):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension in (".so", ".pyd", ".dll"):
pyx_file = str.replace(filename, extension, '.pyx')
print(pyx_file)
if not os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
setup(**metadata)
if __name__ == "__main__":
setup_package()
| mit |
ueshin/apache-spark | python/pyspark/pandas/indexes/category.py | 15 | 7766 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from functools import partial
from typing import Any, no_type_check
import pandas as pd
from pandas.api.types import is_hashable
from pyspark import pandas as ps
from pyspark.pandas.indexes.base import Index
from pyspark.pandas.missing.indexes import MissingPandasLikeCategoricalIndex
from pyspark.pandas.series import Series
class CategoricalIndex(Index):
"""
Index based on an underlying `Categorical`.
CategoricalIndex can only take on a limited,
and usually fixed, number of possible values (`categories`). Also,
it might have an order, but numerical operations
(additions, divisions, ...) are not possible.
Parameters
----------
data : array-like (1-dimensional)
The values of the categorical. If `categories` are given, values not in
`categories` will be replaced with NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here (and also not in `dtype`), they
will be inferred from the `data`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
copy : bool, default False
Make a copy of input ndarray.
name : object, optional
Name to be stored in the index.
See Also
--------
Index : The base pandas-on-Spark Index type.
Examples
--------
>>> ps.CategoricalIndex(["a", "b", "c", "a", "b", "c"]) # doctest: +NORMALIZE_WHITESPACE
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
categories=['a', 'b', 'c'], ordered=False, dtype='category')
``CategoricalIndex`` can also be instantiated from a ``Categorical``:
>>> c = pd.Categorical(["a", "b", "c", "a", "b", "c"])
>>> ps.CategoricalIndex(c) # doctest: +NORMALIZE_WHITESPACE
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
categories=['a', 'b', 'c'], ordered=False, dtype='category')
Ordered ``CategoricalIndex`` can have a min and max value.
>>> ci = ps.CategoricalIndex(
... ["a", "b", "c", "a", "b", "c"], ordered=True, categories=["c", "b", "a"]
... )
>>> ci # doctest: +NORMALIZE_WHITESPACE
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
categories=['c', 'b', 'a'], ordered=True, dtype='category')
From a Series:
>>> s = ps.Series(["a", "b", "c", "a", "b", "c"], index=[10, 20, 30, 40, 50, 60])
>>> ps.CategoricalIndex(s) # doctest: +NORMALIZE_WHITESPACE
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
categories=['a', 'b', 'c'], ordered=False, dtype='category')
From an Index:
>>> idx = ps.Index(["a", "b", "c", "a", "b", "c"])
>>> ps.CategoricalIndex(idx) # doctest: +NORMALIZE_WHITESPACE
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
categories=['a', 'b', 'c'], ordered=False, dtype='category')
"""
@no_type_check
def __new__(cls, data=None, categories=None, ordered=None, dtype=None, copy=False, name=None):
if not is_hashable(name):
raise TypeError("Index.name must be a hashable type")
if isinstance(data, (Series, Index)):
if dtype is None:
dtype = "category"
return Index(data, dtype=dtype, copy=copy, name=name)
return ps.from_pandas(
pd.CategoricalIndex(
data=data, categories=categories, ordered=ordered, dtype=dtype, name=name
)
)
@property
def codes(self) -> Index:
"""
The category codes of this categorical.
Codes are an Index of integers which are the positions of the actual
values in the categories Index.
There is no setter, use the other categorical methods and the normal item
setter to change values in the categorical.
Returns
-------
Index
A non-writable view of the `codes` Index.
Examples
--------
>>> idx = ps.CategoricalIndex(list("abbccc"))
>>> idx # doctest: +NORMALIZE_WHITESPACE
CategoricalIndex(['a', 'b', 'b', 'c', 'c', 'c'],
categories=['a', 'b', 'c'], ordered=False, dtype='category')
>>> idx.codes
Int64Index([0, 1, 1, 2, 2, 2], dtype='int64')
"""
return self._with_new_scol(self.spark.column).rename(None)
@property
def categories(self) -> pd.Index:
"""
The categories of this categorical.
Examples
--------
>>> idx = ps.CategoricalIndex(list("abbccc"))
>>> idx # doctest: +NORMALIZE_WHITESPACE
CategoricalIndex(['a', 'b', 'b', 'c', 'c', 'c'],
categories=['a', 'b', 'c'], ordered=False, dtype='category')
>>> idx.categories
Index(['a', 'b', 'c'], dtype='object')
"""
return self.dtype.categories
@categories.setter
def categories(self, categories: pd.Index) -> None:
raise NotImplementedError()
@property
def ordered(self) -> bool:
"""
Whether the categories have an ordered relationship.
Examples
--------
>>> idx = ps.CategoricalIndex(list("abbccc"))
>>> idx # doctest: +NORMALIZE_WHITESPACE
CategoricalIndex(['a', 'b', 'b', 'c', 'c', 'c'],
categories=['a', 'b', 'c'], ordered=False, dtype='category')
>>> idx.ordered
False
"""
return self.dtype.ordered
def __getattr__(self, item: str) -> Any:
if hasattr(MissingPandasLikeCategoricalIndex, item):
property_or_func = getattr(MissingPandasLikeCategoricalIndex, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
raise AttributeError("'CategoricalIndex' object has no attribute '{}'".format(item))
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.indexes.category
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.indexes.category.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.indexes.category tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.indexes.category,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
eastmallingresearch/crosslink | scripts/check_map_order.py | 1 | 4311 | #!/usr/bin/python
#Crosslink Copyright (C) 2016 NIAB EMR see included NOTICE file for details
'''
plot estimated versus correct map positions
colour coded to show marker type: maternal-only/paternal-only/both
'''
import argparse
ap = argparse.ArgumentParser(description=__doc__,formatter_class=argparse.ArgumentDefaultsHelpFormatter)
ap.add_argument('--inp',default=None,type=str,help='file containing list of marker names and positions in maternal/paternal/combined maps')
ap.add_argument('--maptype',default='map',type=str,help='map, loc or tmp')
ap.add_argument('--NA',default='NA',type=str,help='string used to denote missing values')
conf = ap.parse_args()
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
mat_col = '#ff0000'
pat_col = '#00ff00'
both_col = '#0000ff'
xx = []
yy = []
cc = []
def load_map(f,xx,yy,cc):
'load data from 4 column map file'
f.readline() #skip header
for line in f:
tok = line.strip().split()
assert len(tok) == 4
name = tok[0]
x = float(name.split('_')[1][:-1]) #extract true position from marker name
y = float(tok[3]) #estimated position from combined map
#deduce marker type from mat/pat map info tokens
if tok[1] == conf.NA: c = pat_col
elif tok[2] == conf.NA: c = mat_col
else: c = both_col
xx.append(x)
yy.append(y)
cc.append(c)
def load_map2(f,xx,yy,cc):
'load data from 2 column map file'
for line in f:
tok = line.strip().split()
if len(tok) < 2: continue
if not tok[0].startswith("m"): continue
assert len(tok) == 2
name = tok[0]
x = float(name.split('_')[1][:-1]) #extract true position from marker name
y = float(tok[1]) #estimated position from combined map
mtype = name.split('_')[1][-1]
#unknown colour type
if mtype == 'l': c = mat_col
elif mtype == 'n': c = pat_col
else: c = both_col
xx.append(x)
yy.append(y)
cc.append(c)
def load_tmp(f,xx,yy,cc):
'load data from temporary file type used during testing'
for line in f:
tok = line.strip().split()
assert len(tok) == 3
name = tok[0]
x = float(name.split('_')[1]) #extract true position from marker name
mtype = tok[1] #0=mat 1=pat
y = float(tok[2]) #estimate position
#deduce marker type from mat/pat map info tokens
if mtype == '<lmxll>' or mtype == '0': c = mat_col
elif mtype == '<nnxnp>' or mtype == '1': c = pat_col
else: c = both_col
xx.append(x)
yy.append(y)
cc.append(c)
def load_loc(f,xx,yy,cc):
'load data from loc file'
for i in xrange(5): f.readline() #skip header
for i,line in enumerate(f):
tok = line.strip().split()[:2]
assert len(tok) == 2
name = tok[0]
x = float(name.split('_')[1][:-1]) #extract true position from marker name
y = float(i) #estimated map order
#deduce marker type from second token
if tok[1] == '<lmxll>': c = mat_col
elif tok[1] == '<nnxnp>': c = pat_col
else: c = both_col
xx.append(x)
yy.append(y)
cc.append(c)
#prepare to read from the posteriors file
f = open(conf.inp)
if conf.maptype == 'loc':
load_loc(f,xx,yy,cc)
elif conf.maptype == 'tmp':
load_tmp(f,xx,yy,cc)
elif conf.maptype == 'map2':
load_map2(f,xx,yy,cc)
else:
load_map(f,xx,yy,cc)
f.close()
mat_patch = mpatches.Patch(color=mat_col, label='mat')
pat_patch = mpatches.Patch(color=pat_col, label='pat')
both_patch = mpatches.Patch(color=both_col, label='both')
plt.legend(handles=[mat_patch,pat_patch,both_patch])
#plt.scatter(xx,yy,c='#00ff00',s=50.0)
plt.scatter(xx,yy,c=cc,s=50.0)
#plt.figure(1)
#histogram subplot
#ax1 = plt.subplot(111)
#ax1.boxplot([[x[0] for x in posn],[x[1] for x in posn],[x[2] for x in posn]])
#plt.legend()
plt.tight_layout()
plt.show()
| gpl-2.0 |
nitin-cherian/LifeLongLearning | Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/IPython/core/magics/basic.py | 2 | 21310 | """Implementation of basic magic functions."""
import argparse
import textwrap
import io
import sys
from pprint import pformat
from IPython.core import magic_arguments, page
from IPython.core.error import UsageError
from IPython.core.magic import Magics, magics_class, line_magic, magic_escapes
from IPython.utils.text import format_screen, dedent, indent
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils.ipstruct import Struct
from warnings import warn
from logging import error
class MagicsDisplay(object):
def __init__(self, magics_manager, ignore=None):
self.ignore = ignore if ignore else []
self.magics_manager = magics_manager
def _lsmagic(self):
"""The main implementation of the %lsmagic"""
mesc = magic_escapes['line']
cesc = magic_escapes['cell']
mman = self.magics_manager
magics = mman.lsmagic()
out = ['Available line magics:',
mesc + (' '+mesc).join(sorted([m for m,v in magics['line'].items() if (v not in self.ignore)])),
'',
'Available cell magics:',
cesc + (' '+cesc).join(sorted([m for m,v in magics['cell'].items() if (v not in self.ignore)])),
'',
mman.auto_status()]
return '\n'.join(out)
def _repr_pretty_(self, p, cycle):
p.text(self._lsmagic())
def __str__(self):
return self._lsmagic()
def _jsonable(self):
"""turn magics dict into jsonable dict of the same structure
replaces object instances with their class names as strings
"""
magic_dict = {}
mman = self.magics_manager
magics = mman.lsmagic()
for key, subdict in magics.items():
d = {}
magic_dict[key] = d
for name, obj in subdict.items():
try:
classname = obj.__self__.__class__.__name__
except AttributeError:
classname = 'Other'
d[name] = classname
return magic_dict
def _repr_json_(self):
return self._jsonable()
@magics_class
class BasicMagics(Magics):
"""Magics that provide central IPython functionality.
These are various magics that don't fit into specific categories but that
are all part of the base 'IPython experience'."""
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-l', '--line', action='store_true',
help="""Create a line magic alias."""
)
@magic_arguments.argument(
'-c', '--cell', action='store_true',
help="""Create a cell magic alias."""
)
@magic_arguments.argument(
'name',
help="""Name of the magic to be created."""
)
@magic_arguments.argument(
'target',
help="""Name of the existing line or cell magic."""
)
@magic_arguments.argument(
'-p', '--params', default=None,
help="""Parameters passed to the magic function."""
)
@line_magic
def alias_magic(self, line=''):
"""Create an alias for an existing line or cell magic.
Examples
--------
::
In [1]: %alias_magic t timeit
Created `%t` as an alias for `%timeit`.
Created `%%t` as an alias for `%%timeit`.
In [2]: %t -n1 pass
1 loops, best of 3: 954 ns per loop
In [3]: %%t -n1
...: pass
...:
1 loops, best of 3: 954 ns per loop
In [4]: %alias_magic --cell whereami pwd
UsageError: Cell magic function `%%pwd` not found.
In [5]: %alias_magic --line whereami pwd
Created `%whereami` as an alias for `%pwd`.
In [6]: %whereami
Out[6]: u'/home/testuser'
In [7]: %alias_magic h history -p "-l 30" --line
Created `%h` as an alias for `%history -l 30`.
"""
args = magic_arguments.parse_argstring(self.alias_magic, line)
shell = self.shell
mman = self.shell.magics_manager
escs = ''.join(magic_escapes.values())
target = args.target.lstrip(escs)
name = args.name.lstrip(escs)
params = args.params
if (params and
((params.startswith('"') and params.endswith('"'))
or (params.startswith("'") and params.endswith("'")))):
params = params[1:-1]
# Find the requested magics.
m_line = shell.find_magic(target, 'line')
m_cell = shell.find_magic(target, 'cell')
if args.line and m_line is None:
raise UsageError('Line magic function `%s%s` not found.' %
(magic_escapes['line'], target))
if args.cell and m_cell is None:
raise UsageError('Cell magic function `%s%s` not found.' %
(magic_escapes['cell'], target))
# If --line and --cell are not specified, default to the ones
# that are available.
if not args.line and not args.cell:
if not m_line and not m_cell:
raise UsageError(
'No line or cell magic with name `%s` found.' % target
)
args.line = bool(m_line)
args.cell = bool(m_cell)
params_str = "" if params is None else " " + params
if args.line:
mman.register_alias(name, target, 'line', params)
print('Created `%s%s` as an alias for `%s%s%s`.' % (
magic_escapes['line'], name,
magic_escapes['line'], target, params_str))
if args.cell:
mman.register_alias(name, target, 'cell', params)
print('Created `%s%s` as an alias for `%s%s%s`.' % (
magic_escapes['cell'], name,
magic_escapes['cell'], target, params_str))
@line_magic
def lsmagic(self, parameter_s=''):
"""List currently available magic functions."""
return MagicsDisplay(self.shell.magics_manager, ignore=[self.pip])
def _magic_docs(self, brief=False, rest=False):
"""Return docstrings from magic functions."""
mman = self.shell.magics_manager
docs = mman.lsmagic_docs(brief, missing='No documentation')
if rest:
format_string = '**%s%s**::\n\n%s\n\n'
else:
format_string = '%s%s:\n%s\n'
return ''.join(
[format_string % (magic_escapes['line'], fname,
indent(dedent(fndoc)))
for fname, fndoc in sorted(docs['line'].items())]
+
[format_string % (magic_escapes['cell'], fname,
indent(dedent(fndoc)))
for fname, fndoc in sorted(docs['cell'].items())]
)
@line_magic
def magic(self, parameter_s=''):
"""Print information about the magic function system.
Supported formats: -latex, -brief, -rest
"""
mode = ''
try:
mode = parameter_s.split()[0][1:]
except IndexError:
pass
brief = (mode == 'brief')
rest = (mode == 'rest')
magic_docs = self._magic_docs(brief, rest)
if mode == 'latex':
print(self.format_latex(magic_docs))
return
else:
magic_docs = format_screen(magic_docs)
out = ["""
IPython's 'magic' functions
===========================
The magic function system provides a series of functions which allow you to
control the behavior of IPython itself, plus a lot of system-type
features. There are two kinds of magics, line-oriented and cell-oriented.
Line magics are prefixed with the % character and work much like OS
command-line calls: they get as an argument the rest of the line, where
arguments are passed without parentheses or quotes. For example, this will
time the given statement::
%timeit range(1000)
Cell magics are prefixed with a double %%, and they are functions that get as
an argument not only the rest of the line, but also the lines below it in a
separate argument. These magics are called with two arguments: the rest of the
call line and the body of the cell, consisting of the lines below the first.
For example::
%%timeit x = numpy.random.randn((100, 100))
numpy.linalg.svd(x)
will time the execution of the numpy svd routine, running the assignment of x
as part of the setup phase, which is not timed.
In a line-oriented client (the terminal or Qt console IPython), starting a new
input with %% will automatically enter cell mode, and IPython will continue
reading input until a blank line is given. In the notebook, simply type the
whole cell as one entity, but keep in mind that the %% escape can only be at
the very start of the cell.
NOTE: If you have 'automagic' enabled (via the command line option or with the
%automagic function), you don't need to type in the % explicitly for line
magics; cell magics always require an explicit '%%' escape. By default,
IPython ships with automagic on, so you should only rarely need the % escape.
Example: typing '%cd mydir' (without the quotes) changes your working directory
to 'mydir', if it exists.
For a list of the available magic functions, use %lsmagic. For a description
of any of them, type %magic_name?, e.g. '%cd?'.
Currently the magic system has the following functions:""",
magic_docs,
"Summary of magic functions (from %slsmagic):" % magic_escapes['line'],
str(self.lsmagic()),
]
page.page('\n'.join(out))
@line_magic
def page(self, parameter_s=''):
"""Pretty print the object and display it through a pager.
%page [options] OBJECT
If no object is given, use _ (last output).
Options:
-r: page str(object), don't pretty-print it."""
# After a function contributed by Olivier Aubert, slightly modified.
# Process options/args
opts, args = self.parse_options(parameter_s, 'r')
raw = 'r' in opts
oname = args and args or '_'
info = self.shell._ofind(oname)
if info['found']:
txt = (raw and str or pformat)( info['obj'] )
page.page(txt)
else:
print('Object `%s` not found' % oname)
@line_magic
def profile(self, parameter_s=''):
"""Print your currently active IPython profile.
See Also
--------
prun : run code using the Python profiler
(:meth:`~IPython.core.magics.execution.ExecutionMagics.prun`)
"""
raise UsageError("The `%profile` magic has been deprecated since IPython 2.0. "
"and removed in IPython 6.0. Please use the value of `get_ipython().profile` instead "
"to see current profile in use. Perhaps you meant to use `%prun` to profile code?")
@line_magic
def pprint(self, parameter_s=''):
"""Toggle pretty printing on/off."""
ptformatter = self.shell.display_formatter.formatters['text/plain']
ptformatter.pprint = bool(1 - ptformatter.pprint)
print('Pretty printing has been turned',
['OFF','ON'][ptformatter.pprint])
@line_magic
def colors(self, parameter_s=''):
"""Switch color scheme for prompts, info system and exception handlers.
Currently implemented schemes: NoColor, Linux, LightBG.
Color scheme names are not case-sensitive.
Examples
--------
To get a plain black and white terminal::
%colors nocolor
"""
def color_switch_err(name):
warn('Error changing %s color schemes.\n%s' %
(name, sys.exc_info()[1]), stacklevel=2)
new_scheme = parameter_s.strip()
if not new_scheme:
raise UsageError(
"%colors: you must specify a color scheme. See '%colors?'")
# local shortcut
shell = self.shell
# Set shell colour scheme
try:
shell.colors = new_scheme
shell.refresh_style()
except:
color_switch_err('shell')
# Set exception colors
try:
shell.InteractiveTB.set_colors(scheme = new_scheme)
shell.SyntaxTB.set_colors(scheme = new_scheme)
except:
color_switch_err('exception')
# Set info (for 'object?') colors
if shell.color_info:
try:
shell.inspector.set_active_scheme(new_scheme)
except:
color_switch_err('object inspector')
else:
shell.inspector.set_active_scheme('NoColor')
@line_magic
def xmode(self, parameter_s=''):
"""Switch modes for the exception handlers.
Valid modes: Plain, Context and Verbose.
If called without arguments, acts as a toggle."""
def xmode_switch_err(name):
warn('Error changing %s exception modes.\n%s' %
(name,sys.exc_info()[1]))
shell = self.shell
new_mode = parameter_s.strip().capitalize()
try:
shell.InteractiveTB.set_mode(mode=new_mode)
print('Exception reporting mode:',shell.InteractiveTB.mode)
except:
xmode_switch_err('user')
@line_magic
def pip(self, args=''):
"""
Intercept usage of ``pip`` in IPython and direct user to run command outside of IPython.
"""
print(textwrap.dedent('''
The following command must be run outside of the IPython shell:
$ pip {args}
The Python package manager (pip) can only be used from outside of IPython.
Please reissue the `pip` command in a separate terminal or command prompt.
See the Python documentation for more informations on how to install packages:
https://docs.python.org/3/installing/'''.format(args=args)))
@line_magic
def quickref(self, arg):
""" Show a quick reference sheet """
from IPython.core.usage import quick_reference
qr = quick_reference + self._magic_docs(brief=True)
page.page(qr)
@line_magic
def doctest_mode(self, parameter_s=''):
"""Toggle doctest mode on and off.
This mode is intended to make IPython behave as much as possible like a
plain Python shell, from the perspective of how its prompts, exceptions
and output look. This makes it easy to copy and paste parts of a
session into doctests. It does so by:
- Changing the prompts to the classic ``>>>`` ones.
- Changing the exception reporting mode to 'Plain'.
- Disabling pretty-printing of output.
Note that IPython also supports the pasting of code snippets that have
leading '>>>' and '...' prompts in them. This means that you can paste
doctests from files or docstrings (even if they have leading
whitespace), and the code will execute correctly. You can then use
'%history -t' to see the translated history; this will give you the
input after removal of all the leading prompts and whitespace, which
can be pasted back into an editor.
With these features, you can switch into this mode easily whenever you
need to do testing and changes to doctests, without having to leave
your existing IPython session.
"""
# Shorthands
shell = self.shell
meta = shell.meta
disp_formatter = self.shell.display_formatter
ptformatter = disp_formatter.formatters['text/plain']
# dstore is a data store kept in the instance metadata bag to track any
# changes we make, so we can undo them later.
dstore = meta.setdefault('doctest_mode',Struct())
save_dstore = dstore.setdefault
# save a few values we'll need to recover later
mode = save_dstore('mode',False)
save_dstore('rc_pprint',ptformatter.pprint)
save_dstore('xmode',shell.InteractiveTB.mode)
save_dstore('rc_separate_out',shell.separate_out)
save_dstore('rc_separate_out2',shell.separate_out2)
save_dstore('rc_separate_in',shell.separate_in)
save_dstore('rc_active_types',disp_formatter.active_types)
if not mode:
# turn on
# Prompt separators like plain python
shell.separate_in = ''
shell.separate_out = ''
shell.separate_out2 = ''
ptformatter.pprint = False
disp_formatter.active_types = ['text/plain']
shell.magic('xmode Plain')
else:
# turn off
shell.separate_in = dstore.rc_separate_in
shell.separate_out = dstore.rc_separate_out
shell.separate_out2 = dstore.rc_separate_out2
ptformatter.pprint = dstore.rc_pprint
disp_formatter.active_types = dstore.rc_active_types
shell.magic('xmode ' + dstore.xmode)
# mode here is the state before we switch; switch_doctest_mode takes
# the mode we're switching to.
shell.switch_doctest_mode(not mode)
# Store new mode and inform
dstore.mode = bool(not mode)
mode_label = ['OFF','ON'][dstore.mode]
print('Doctest mode is:', mode_label)
@line_magic
def gui(self, parameter_s=''):
"""Enable or disable IPython GUI event loop integration.
%gui [GUINAME]
This magic replaces IPython's threaded shells that were activated
using the (pylab/wthread/etc.) command line flags. GUI toolkits
can now be enabled at runtime and keyboard
interrupts should work without any problems. The following toolkits
are supported: wxPython, PyQt4, PyGTK, Tk and Cocoa (OSX)::
%gui wx # enable wxPython event loop integration
%gui qt4|qt # enable PyQt4 event loop integration
%gui qt5 # enable PyQt5 event loop integration
%gui gtk # enable PyGTK event loop integration
%gui gtk3 # enable Gtk3 event loop integration
%gui tk # enable Tk event loop integration
%gui osx # enable Cocoa event loop integration
# (requires %matplotlib 1.1)
%gui # disable all event loop integration
WARNING: after any of these has been called you can simply create
an application object, but DO NOT start the event loop yourself, as
we have already handled that.
"""
opts, arg = self.parse_options(parameter_s, '')
if arg=='': arg = None
try:
return self.shell.enable_gui(arg)
except Exception as e:
# print simple error message, rather than traceback if we can't
# hook up the GUI
error(str(e))
@skip_doctest
@line_magic
def precision(self, s=''):
"""Set floating point precision for pretty printing.
Can set either integer precision or a format string.
If numpy has been imported and precision is an int,
numpy display precision will also be set, via ``numpy.set_printoptions``.
If no argument is given, defaults will be restored.
Examples
--------
::
In [1]: from math import pi
In [2]: %precision 3
Out[2]: u'%.3f'
In [3]: pi
Out[3]: 3.142
In [4]: %precision %i
Out[4]: u'%i'
In [5]: pi
Out[5]: 3
In [6]: %precision %e
Out[6]: u'%e'
In [7]: pi**10
Out[7]: 9.364805e+04
In [8]: %precision
Out[8]: u'%r'
In [9]: pi**10
Out[9]: 93648.047476082982
"""
ptformatter = self.shell.display_formatter.formatters['text/plain']
ptformatter.float_precision = s
return ptformatter.float_format
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-e', '--export', action='store_true', default=False,
help=argparse.SUPPRESS
)
@magic_arguments.argument(
'filename', type=str,
help='Notebook name or filename'
)
@line_magic
def notebook(self, s):
"""Export and convert IPython notebooks.
This function can export the current IPython history to a notebook file.
For example, to export the history to "foo.ipynb" do "%notebook foo.ipynb".
The -e or --export flag is deprecated in IPython 5.2, and will be
removed in the future.
"""
args = magic_arguments.parse_argstring(self.notebook, s)
from nbformat import write, v4
cells = []
hist = list(self.shell.history_manager.get_range())
if(len(hist)<=1):
raise ValueError('History is empty, cannot export')
for session, execution_count, source in hist[:-1]:
cells.append(v4.new_code_cell(
execution_count=execution_count,
source=source
))
nb = v4.new_notebook(cells=cells)
with io.open(args.filename, 'w', encoding='utf-8') as f:
write(nb, f, version=4)
| mit |
linebp/pandas | pandas/tests/frame/test_sorting.py | 4 | 20958 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import random
import numpy as np
import pandas as pd
from pandas.compat import lrange
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range, NaT, IntervalIndex)
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSorting(TestData):
def test_sort(self):
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# see gh-9816
with tm.assert_produces_warning(FutureWarning):
frame.sortlevel()
def test_sort_values(self):
frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]],
index=[1, 2, 3], columns=list('ABC'))
# by column (axis=0)
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=['B', 'C'])
expected = frame.loc[[2, 1, 3]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=['B', 'C'], ascending=False)
assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False])
assert_frame_equal(sorted_df, expected)
pytest.raises(ValueError, lambda: frame.sort_values(
by=['A', 'B'], axis=2, inplace=True))
# by row (axis=1): GH 10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis='columns')
expected = frame.reindex(columns=['B', 'A', 'C'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1,
ascending=[True, False])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with tm.assert_raises_regex(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_values_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=1, axis=1, inplace=True)
expected = frame.sort_values(by=1, axis=1)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=['B', 'A'])
sorted_df = df.sort_values(by=1, axis=1, na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort_values(['A', 'B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], ascending=[
1, 0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort_values(['A', 'B'], ascending=[
0, 1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(
kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index=[nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index=[nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_values(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 1],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 0],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'g'],
columns=['A'],
index=date_range('20130101', periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11', '2004-01-21', '2004-01-26',
'2005-09-20', '2010-10-04', '2009-05-12',
'2008-11-12', '2010-09-28', '2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort_values(by='A')
df2 = df.sort_values(by=['A'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['B'])
assert_frame_equal(df1, df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with tm.assert_raises_regex(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_nat_values_in_int_column(self):
# GH 14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT))
float_values = (2.0, -1.797693e308)
df = DataFrame(dict(int=int_values, float=float_values),
columns=["int", "float"])
df_reversed = DataFrame(dict(int=int_values[::-1],
float=float_values[::-1]),
columns=["int", "float"],
index=[1, 0])
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(dict(datetime=[Timestamp("2016-01-01"), NaT],
float=float_values), columns=["datetime", "float"])
df_reversed = DataFrame(dict(datetime=[NaT, Timestamp("2016-01-01")],
float=float_values[::-1]),
columns=["datetime", "float"],
index=[1, 0])
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
assert_frame_equal(df_sorted, df_reversed)
class TestDataFrameSortIndexKinds(TestData):
def test_sort_index_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'])
result = frame.sort_values(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'], ascending=False)
result = frame.sort_values(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['B', 'A'])
result = frame.sort_values(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered['A'])
df = unordered.copy()
df.sort_index(inplace=True)
expected = frame
assert_frame_equal(df, expected)
assert a_id != id(df['A'])
df = unordered.copy()
df.sort_index(ascending=False, inplace=True)
expected = frame[::-1]
assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ['D', 'B', 'C', 'A']]
df = unordered.copy()
df.sort_index(axis=1, inplace=True)
expected = frame
assert_frame_equal(df, expected)
df = unordered.copy()
df.sort_index(axis=1, ascending=False, inplace=True)
expected = frame.iloc[:, ::-1]
assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['A', 'B'], ascending=[1, 0])
result = df.sort_values(by=['A', 'B'], ascending=[1, 0])
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
assert_frame_equal(result, expected)
# test with multiindex, too
idf = df.set_index(['A', 'B'])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
assert_frame_equal(result, expected)
# also, Series!
result = idf['C'].sort_index(ascending=[1, 0])
assert_series_equal(result, expected['C'])
def test_sort_index_duplicates(self):
# with 9816, these are all translated to .sort_values
df = DataFrame([lrange(5, 9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
with tm.assert_raises_regex(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'duplicate'):
df.sort_values(by='a')
with tm.assert_raises_regex(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['a'])
with tm.assert_raises_regex(ValueError, 'duplicate'):
df.sort_values(by=['a'])
with tm.assert_raises_regex(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
# multi-column 'by' is separate codepath
df.sort_index(by=['a', 'b'])
with tm.assert_raises_regex(ValueError, 'duplicate'):
# multi-column 'by' is separate codepath
df.sort_values(by=['a', 'b'])
# with multi-index
# GH4370
df = DataFrame(np.random.randn(4, 2),
columns=MultiIndex.from_tuples([('a', 0), ('a', 1)]))
with tm.assert_raises_regex(ValueError, 'levels'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'levels'):
df.sort_values(by='a')
# convert tuples to a list of tuples
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=[('a', 1)])
expected = df.sort_values(by=[('a', 1)])
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=('a', 1))
result = df.sort_values(by=('a', 1))
assert_frame_equal(result, expected)
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
res = df.sort_index(level='A', sort_remaining=False)
assert_frame_equal(df, res)
res = df.sort_index(level=['A', 'B'], sort_remaining=False)
assert_frame_equal(df, res)
def test_sort_index_categorical_index(self):
df = (DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca'))
.astype('category', categories=list('cab'))})
.set_index('B'))
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[3, 2, 5, 1, 0, 4]]
assert_frame_equal(result, expected)
def test_sort_index(self):
# GH13496
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0 : sort rows by index labels
unordered = frame.loc[[3, 2, 4, 1]]
result = unordered.sort_index(axis=0)
expected = frame
assert_frame_equal(result, expected)
result = unordered.sort_index(ascending=False)
expected = frame[::-1]
assert_frame_equal(result, expected)
# axis=1 : sort columns by column names
unordered = frame.iloc[:, [2, 1, 3, 0]]
result = unordered.sort_index(axis=1)
assert_frame_equal(result, frame)
result = unordered.sort_index(axis=1, ascending=False)
expected = frame.iloc[:, ::-1]
assert_frame_equal(result, expected)
def test_sort_index_multiindex(self):
# GH13496
# sort rows by specified level of multi-index
mi = MultiIndex.from_tuples([[2, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
# MI sort, but no level: sort_level has no effect
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(sort_remaining=False)
expected = df.sort_index()
assert_frame_equal(result, expected)
def test_sort_index_intervalindex(self):
# this is a de-facto sort via unstack
# confirming that we sort in the order of the bins
y = Series(np.random.randn(100))
x1 = Series(np.sign(np.random.randn(100)))
x2 = pd.cut(Series(np.random.randn(100)),
bins=[-3, -0.5, 0, 0.5, 3])
model = pd.concat([y, x1, x2], axis=1, keys=['Y', 'X1', 'X2'])
result = model.groupby(['X1', 'X2']).mean().unstack()
expected = IntervalIndex.from_tuples(
[(-3.0, -0.5), (-0.5, 0.0),
(0.0, 0.5), (0.5, 3.0)],
closed='right')
result = result.columns.levels[1].categories
tm.assert_index_equal(result, expected)
| bsd-3-clause |
zaxliu/deepnap | experiments/kdd-exps/experiment_message_2016-6-12_G5_BUF2_AR1_b65_legacy.py | 1 | 4374 | # System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_file_name = 'message_2016-6-12_G5_BUF2_AR1_b65.log'
# Composite classes
class Phi_QAgentNN(PhiMixin, QAgentNN):
def __init__(self, **kwargs):
super(Phi_QAgentNN, self).__init__(**kwargs)
# Parameters
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.5, 0.9
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
phi_length = 5
dim_state = (1, phi_length, 3+2)
range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
range_state = [[range_state_slice]*phi_length]
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size = 2, 200
reward_scaling, reward_scaling_update = 1, 'adaptive'
batch_size, update_period, freeze_period, rs_period = 100, 4, 16, 32
# |- Env model
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, 0.0
beta = 0.65
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- Env
# |- Time
start_time = pd.to_datetime('2014-11-05 09:20:00')
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_dh3.dat',
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
agent = Phi_QAgentNN(
phi_length=phi_length,
dim_state=dim_state, range_state=range_state,
f_build_net = None,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
| bsd-3-clause |
anurag313/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>,
# Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
rneher/FitnessInference | flu/src/clade_frequency_correlations.py | 1 | 5578 | #!/ebio/ag-neher/share/programs/bin/python2.7
################################################################################
#
# author: Richard Neher
# email: richard.neher@tuebingen.mpg.de
#
# Reference: Richard A. Neher, Colin A Russell, Boris I Shraiman.
# "Predicting evolution from the shape of genealogical trees"
#
################################################################################
#
#
################################################################################
import matplotlib
matplotlib.use('pdf')
import sys
sys.path.append('/ebio/ag-neher/share/users/rneher/FluPrediction_code/flu/src')
import test_flu_prediction as test_flu
import predict_flu as flu
import tree_utils
from Bio import Phylo,AlignIO,SeqIO
from matplotlib import pyplot as plt
import numpy as np
from scipy import stats
import glob,pickle,gzip,os,argparse
from datetime import date
file_formats = ['.pdf', '.svg']
plt.rcParams.update(test_flu.mpl_params)
figure_folder = '../figures_ms/'
analysis_folder = test_flu.flu_analysis_folder
mem_scale = 2.0**np.arange(-6,3)
def clade_frequency_correlations_func(params):
# set up the prediction and pass all parameters to the wrapper function
prediction = test_flu.predict_params(['mean_fitness'],
params)
# define the methodes for which the predictions are to be evaluated
methods = [('mean_fitness', '_ext', prediction.terminals),
('mean_fitness', '_int', prediction.non_terminals)]
distances, distances_epi, test_data = test_flu.evaluate(prediction, methods, params)
tbins_eval = [date(year=params.year, month = 6, day=1), date(year=params.year+1, month = 6, day=1)]
combined_data = test_flu.make_combined_data(prediction, test_data, collapse = params.collapse)
combined_tree = flu.flu_ranking(combined_data, time_bins = tbins_eval, pseudo_count = 0)
combined_tree.expansion_score()
tree_utils.find_internal_nodes(prediction.T,combined_tree.T)
fitness_and_freqs = []
for node in prediction.non_terminals:
fitness_and_freqs.append([node.mean_fitness, node.polarizer]+list(node.mirror_node.temporal_frequency))
fitness_and_freqs = np.array(fitness_and_freqs)
return fitness_and_freqs
def clade_frequency_correlations_func_polarizer(params):
# set up the prediction and pass all parameters to the wrapper function
params.diffusion=1.0
params.gamma = 1.0
prediction = test_flu.predict_params(['polarizer'],
params)
# define the methodes for which the predictions are to be evaluated
methods = [('polarizer', '_ext', prediction.terminals),
('polarizer', '_int', prediction.non_terminals)]
distances, distances_epi, test_data = test_flu.evaluate(prediction, methods, params)
tbins_eval = [date(year=params.year, month = 6, day=1), date(year=params.year+1, month = 6, day=1)]
combined_data = test_flu.make_combined_data(prediction, test_data, collapse = params.collapse)
combined_tree = flu.flu_ranking(combined_data, time_bins = tbins_eval, pseudo_count = 0)
combined_tree.expansion_score()
tree_utils.find_internal_nodes(prediction.T,combined_tree.T)
freqs = [node.mirror_node.temporal_frequency for node in prediction.non_terminals]
polarizers= []
for tau in mem_scale:
prediction.calculate_polarizers(mem = tau)
polarizers.append([node.polarizer for node in prediction.non_terminals])
polarizers_and_freqs = np.hstack( (np.array(polarizers).T, np.array(freqs)))
return polarizers_and_freqs
def clade_frequency_correlations_all_years(params, years = range(1995,2014)):
fitness_and_freqs = []
for year in years:
params.year = year
params.collapse = True
params.sample_size =200
params.omega=0.001
fitness_and_freqs.append(clade_frequency_correlations_func_polarizer(params))
return fitness_and_freqs
def plot_clade_frequencies_correlations(fitness_and_freqs, coli = 0):
all_freqs = []
plt.figure()
for faf in fitness_and_freqs:
tmp = faf[(faf[:,-2]<0.8)*(faf[:,-2]>0.005)]
rank_fit = stats.rankdata(tmp[:,coli])
rank_fit/=rank_fit.max()
rank_freq = stats.rankdata(tmp[:,-1]/tmp[:,-2])
rank_freq/=rank_freq.max()
all_freqs.extend([(a,b) for a,b in zip(rank_fit,rank_freq)])
plt.scatter(rank_fit, rank_freq)
print stats.spearmanr(tmp[:,coli],tmp[:,-1]/tmp[:,-2])
plt.xlabel('fitness rank')
plt.ylabel('growth rank')
all_freqs = np.array(all_freqs)
print 'all', stats.spearmanr(all_freqs[:,0], all_freqs[:,1])
return all_freqs
if __name__=="__main__":
# parse the commandline arguments
parser = test_flu.make_flu_parser()
params=parser.parse_args()
if params.year>0:
faf = clade_frequency_correlations_func_polarizer(params)
plot_clade_frequencies_correlations([faf])
else:
coli=4
faf = clade_frequency_correlations_all_years(params, years = range(2003,2014))
normed_freqs = plot_clade_frequencies_correlations(faf, coli=coli)
for ff in file_formats:
plt.savefig(figure_folder+'fitness_frequency_correlations_years_2003_2014_gamma_'
+str(mem_scale[coli]/2)+ff)
with open(analysis_folder+'fitness_frequency_correlations_years_2003_2014_gamma_'
+str(mem_scale[coli]/2)+'.pickle', 'w') as outfile:
pickle.dump(normed_freqs, outfile)
| mit |
mne-tools/mne-tools.github.io | 0.16/_downloads/plot_time_frequency_simulated.py | 5 | 8402 | """
======================================================================
Time-frequency on simulated data (Multitaper vs. Morlet vs. Stockwell)
======================================================================
This example demonstrates the different time-frequency estimation methods
on simulated data. It shows the time-frequency resolution trade-off
and the problem of estimation variance. In addition it highlights
alternative functions for generating TFRs without averaging across
trials, or by operating on numpy arrays.
"""
# Authors: Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Chris Holdgraf <choldgraf@berkeley.edu>
#
# License: BSD (3-clause)
import numpy as np
from matplotlib import pyplot as plt
from mne import create_info, EpochsArray
from mne.baseline import rescale
from mne.time_frequency import (tfr_multitaper, tfr_stockwell, tfr_morlet,
tfr_array_morlet)
print(__doc__)
###############################################################################
# Simulate data
# -------------
#
# We'll simulate data with a known spectro-temporal structure.
sfreq = 1000.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = 1024 # Just over 1 second epochs
n_epochs = 40
seed = 42
rng = np.random.RandomState(seed)
noise = rng.randn(n_epochs, len(ch_names), n_times)
# Add a 50 Hz sinusoidal burst to the noise and ramp it.
t = np.arange(n_times, dtype=np.float) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
data = noise + signal
reject = dict(grad=4000)
events = np.empty((n_epochs, 3), dtype=int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=data, info=info, events=events, event_id=event_id,
reject=reject)
###############################################################################
# Calculate a time-frequency representation (TFR)
# -----------------------------------------------
#
# Below we'll demonstrate the output of several TFR functions in MNE:
#
# * :func:`mne.time_frequency.tfr_multitaper`
# * :func:`mne.time_frequency.tfr_stockwell`
# * :func:`mne.time_frequency.tfr_morlet`
#
# Multitaper transform
# ====================
# First we'll use the multitaper method for calculating the TFR.
# This creates several orthogonal tapering windows in the TFR estimation,
# which reduces variance. We'll also show some of the parameters that can be
# tweaked (e.g., ``time_bandwidth``) that will result in different multitaper
# properties, and thus a different TFR. You can trade time resolution or
# frequency resolution or both in order to get a reduction in variance.
freqs = np.arange(5., 100., 3.)
vmin, vmax = -3., 3. # Define our color limits.
###############################################################################
# **(1) Least smoothing (most variance/background fluctuations).**
n_cycles = freqs / 2.
time_bandwidth = 2.0 # Least possible frequency-smoothing (1 taper)
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Least smoothing, most variance')
###############################################################################
# **(2) Less frequency smoothing, more time smoothing.**
n_cycles = freqs # Increase time-window length to 1 second.
time_bandwidth = 4.0 # Same frequency-smoothing as (1) 3 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less frequency smoothing, more time smoothing')
###############################################################################
# **(3) Less time smoothing, more frequency smoothing.**
n_cycles = freqs / 2.
time_bandwidth = 8.0 # Same time-smoothing as (1), 7 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less time smoothing, more frequency smoothing')
##############################################################################
# Stockwell (S) transform
# =======================
#
# Stockwell uses a Gaussian window to balance temporal and spectral resolution.
# Importantly, frequency bands are phase-normalized, hence strictly comparable
# with regard to timing, and, the input signal can be recoverd from the
# transform in a lossless way if we disregard numerical errors. In this case,
# we control the spectral / temporal resolution by specifying different widths
# of the gaussian window using the ``width`` parameter.
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
fmin, fmax = freqs[[0, -1]]
for width, ax in zip((0.2, .7, 3.0), axs):
power = tfr_stockwell(epochs, fmin=fmin, fmax=fmax, width=width)
power.plot([0], baseline=(0., 0.1), mode='mean', axes=ax, show=False,
colorbar=False)
ax.set_title('Sim: Using S transform, width = {:0.1f}'.format(width))
plt.tight_layout()
###############################################################################
# Morlet Wavelets
# ===============
#
# Finally, show the TFR using morlet wavelets, which are a sinusoidal wave
# with a gaussian envelope. We can control the balance between spectral and
# temporal resolution with the ``n_cycles`` parameter, which defines the
# number of cycles to include in the window.
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
all_n_cycles = [1, 3, freqs / 2.]
for n_cycles, ax in zip(all_n_cycles, axs):
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False)
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
axes=ax, show=False, colorbar=False)
n_cycles = 'scaled by freqs' if not isinstance(n_cycles, int) else n_cycles
ax.set_title('Sim: Using Morlet wavelet, n_cycles = %s' % n_cycles)
plt.tight_layout()
###############################################################################
# Calculating a TFR without averaging over epochs
# -----------------------------------------------
#
# It is also possible to calculate a TFR without averaging across trials.
# We can do this by using ``average=False``. In this case, an instance of
# :class:`mne.time_frequency.EpochsTFR` is returned.
n_cycles = freqs / 2.
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False, average=False)
print(type(power))
avgpower = power.average()
avgpower.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Using Morlet wavelets and EpochsTFR', show=False)
###############################################################################
# Operating on arrays
# -------------------
#
# MNE also has versions of the functions above which operate on numpy arrays
# instead of MNE objects. They expect inputs of the shape
# ``(n_epochs, n_channels, n_times)``. They will also return a numpy array
# of shape ``(n_epochs, n_channels, n_freqs, n_times)``.
power = tfr_array_morlet(epochs.get_data(), sfreq=epochs.info['sfreq'],
freqs=freqs, n_cycles=n_cycles,
output='avg_power')
# Baseline the output
rescale(power, epochs.times, (0., 0.1), mode='mean', copy=False)
fig, ax = plt.subplots()
mesh = ax.pcolormesh(epochs.times * 1000, freqs, power[0],
cmap='RdBu_r', vmin=vmin, vmax=vmax)
ax.set_title('TFR calculated on a numpy array')
ax.set(ylim=freqs[[0, -1]], xlabel='Time (ms)')
fig.colorbar(mesh)
plt.tight_layout()
plt.show()
| bsd-3-clause |
kylerbrown/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
jgliss/pydoas | pydoas/helpers.py | 1 | 3793 | # -*- coding: utf-8 -*-
#
# Pydoas is a Python library for the post-analysis of DOAS result data
# Copyright (C) 2017 Jonas Gliß (jonasgliss@gmail.com)
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the BSD 3-Clause License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See BSD 3-Clause License for more details
# (https://opensource.org/licenses/BSD-3-Clause)
"""
Module containing all sorts of helper methods
"""
import matplotlib.cm as colormaps
import matplotlib.colors as colors
from datetime import datetime, time, date
from matplotlib.pyplot import draw
from numpy import linspace, hstack, vectorize, int, floor, log10, isnan
exponent = lambda num: int(floor(log10(abs(num))))
time_delta_to_seconds = vectorize(lambda x: x.total_seconds())
def to_datetime(value):
"""Method to evaluate time and / or date input and convert to datetime"""
if isinstance(value, datetime):
return value
elif isinstance(value, date):
return datetime.combine(value, time())
elif isinstance(value, time):
return datetime.combine(date(1900,1,1), value)
else:
raise ValueError("Conversion into datetime object failed for input: "
"%s (type: %s)" %(value, type(value)))
def isnum(val):
"""Checks if input is number (int or float) and not nan
:returns: bool, True or False
"""
if isinstance(val, (int, float)) and not isnan(val):
return True
return False
def shifted_color_map(vmin, vmax, cmap = None):
"""Shift center of a diverging colormap to value 0
.. note::
This method was found `here <http://stackoverflow.com/questions/
7404116/defining-the-midpoint-of-a-colormap-in-matplotlib>`_
(last access: 17/01/2017). Thanks to `Paul H <http://stackoverflow.com/
users/1552748/paul-h>`_ who provided it.
Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and if you want the
middle of the colormap's dynamic range to be at zero level
:param vmin: lower end of data value range
:param vmax: upper end of data value range
:param cmap: colormap (if None, use default cmap: seismic)
:return:
- shifted colormap
"""
if cmap is None:
cmap = colormaps.seismic
midpoint = 1 - abs(vmax)/(abs(vmax) + abs(vmin))
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = linspace(0, 1, 257)
# shifted index to match the data
shift_index = hstack([
linspace(0.0, midpoint, 128, endpoint=False),
linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
return colors.LinearSegmentedColormap('shiftedcmap', cdict)
def _print_list(lst):
"""Print a list rowwise"""
for item in lst:
print(item)
def rotate_xtick_labels(ax, deg=30, ha="right"):
"""Rotate xtick labels in matplotlib axes object"""
draw()
lbls = ax.get_xticklabels()
lbls = [lbl.get_text() for lbl in lbls]
ax.set_xticklabels(lbls, rotation = 30, ha = "right")
draw()
return ax
def find_fitted_species_doasis_header(file_name):
"""Search all fitted species in header of DOASIS resultfile"""
raise NotImplementedError
| bsd-3-clause |
js7558/pyBinance | tests/test-getOpenOrders.py | 1 | 2221 | #!/usr/bin/python
import pandas as pd
import sys
sys.path.append('../')
from Binance import Binance
import logging.config
import logging.handlers
import logging
import os
# this logging configuration is sketchy
binance = logging.getLogger(__name__)
logging.config.fileConfig('logging.ini')
# create Binance object
bn = Binance()
# set keys
bn.setSecretKey('NhqPtmdSJYdKjVHjA7PZj4Mge3R5YNiP1e3UZjInClVN65XAbvqqM6A7H5fATj0j')
bn.setAPIKey('vmPUZE6mv9SD5VNHk4HlWFsOr6aKE2zvsw0MuIgwCIPy6utIco14y7Ju91duEh8A')
# getOpenOrders
print "---------------- getOpenOrders --------------"
print "################################# POSITIVE TESTS (returns 1 or r) ###################"
queryParams = {'symbol':'SALTBTC'}
print "****test valid mandatory input symbol, timestamp autogenerated"
test = bn.getOpenOrders(queryParams)
print
queryParams = {'symbol':'SALTBTC','timestamp':1507770491000}
print "****test valid mandatory input symbol, timestamp supplied"
test = bn.getOpenOrders(queryParams)
print
queryParams = {'symbol':'SALTBTC','recvWindow':123435234}
print "****test valid mandatory input symbol, timestamp autogenerated, optional params"
test = bn.getOpenOrders(queryParams)
print
queryParams = {'symbol':'SALTBTC','timestamp':1507770491000,'recvWindow':123435234}
print "****test valid mandatory input symbol, timestamp supplied, optional params"
test = bn.getOpenOrders(queryParams)
print
print "################################# NEGATIVE TESTS (returns 0) ###################"
print
queryParams = {'recvWindow':112234}
print "****test valid optional inputs, valid parameter missing"
test = bn.getOpenOrders(queryParams)
print
queryParams = {'symbol':12.5,'orderId':3334344}
print "****test valid mandatory inputs present with invalid type"
test = bn.getOpenOrders(queryParams)
print
queryParams = {'symbol':'SALTBTC','recvWindow':'123456778','timestamp':150774295}
print "****test valid mandatory inputs, invalid user proved timestamp, plus some optional"
test = bn.getOpenOrders(queryParams)
print
queryParams = {'symbol':'ETHBTC','timestamp':'abcdefghijklm'}
print "****test valid mandatory inputs, invalid user proved timestamp type but length ok, plus some optional"
test = bn.getOpenOrders(queryParams)
print
| mit |
joshloyal/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 104 | 3139 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the mean and std for each candidate along with the parameter
# settings for all the candidates explored by grid search.
n_candidates = len(grid_search.cv_results_['params'])
for i in range(n_candidates):
print(i, 'params - %s; mean - %0.2f; std - %0.2f'
% (grid_search.cv_results_['params'][i],
grid_search.cv_results_['mean_test_score'][i],
grid_search.cv_results_['std_test_score'][i]))
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
cloudera/ibis | ibis/backends/spark/udf.py | 1 | 6563 | """
APIs for creating user-defined element-wise, reduction and analytic
functions.
"""
import collections
import functools
import itertools
import pyspark.sql.functions as f
import pyspark.sql.types as pt
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.signature as sig
import ibis.udf.validate as v
from .compiler import SparkUDAFNode, SparkUDFNode, compiles
from .datatypes import spark_dtype
_udf_name_cache = collections.defaultdict(itertools.count)
class SparkUDF:
base_class = SparkUDFNode
def __init__(self, input_type, output_type):
self.input_type = list(map(dt.dtype, input_type))
self.output_type = dt.dtype(output_type)
self.spark_output_type = spark_dtype(self.output_type)
def validate_func_and_types(self, func):
if not callable(func):
raise TypeError('func must be callable, got {}'.format(func))
# Validate that the input_type argument and the function signature
# match and that the output_type is valid
v.validate_input_type(self.input_type, func)
v.validate_output_type(self.output_type)
if not self.output_type.nullable:
raise com.IbisTypeError(
'Spark does not support non-nullable output types'
)
def pyspark_udf(self, func):
return f.udf(func, self.spark_output_type)
def create_udf_node(self, udf_func):
"""Create a new UDF node type and adds a corresponding compile rule.
Parameters
----------
udf_func : function
Should be the result of calling pyspark.sql.functions.udf or
pyspark.sql.functions.pandas_udf on the user-specified func
Returns
-------
result : type
A new SparkUDFNode or SparkUDAFNode subclass
"""
name = udf_func.__name__
definition = next(_udf_name_cache[name])
external_name = '{}_{:d}'.format(name, definition)
UDFNode = type(
external_name,
(self.base_class,),
{
'signature': sig.TypeSignature.from_dtypes(self.input_type),
'return_type': self.output_type,
},
)
# Add udf_func as a property. If added to the class namespace dict, it
# would be incorrectly used as a bound method, i.e.
# udf_func(t.column) would be a call to bound method func with t.column
# interpreted as self.
UDFNode.udf_func = property(lambda self, udf_func=udf_func: udf_func)
@compiles(UDFNode)
def compiles_udf_node(t, expr):
return '{}({})'.format(
UDFNode.__name__, ', '.join(map(t.translate, expr.op().args))
)
return UDFNode
def __call__(self, func):
self.validate_func_and_types(func)
udf_func = self.pyspark_udf(func)
UDFNode = self.create_udf_node(udf_func)
@functools.wraps(func)
def wrapped(*args):
node = UDFNode(*args)
casted_args = [
arg.cast(typ) for arg, typ in zip(node.args, self.input_type)
]
new_node = UDFNode(*casted_args)
return new_node.to_expr()
return wrapped
class SparkPandasUDF(SparkUDF):
pandas_udf_type = f.PandasUDFType.SCALAR
def validate_func_and_types(self, func):
if isinstance(self.spark_output_type, (pt.MapType, pt.StructType)):
raise com.IbisTypeError(
'Spark does not support MapType or StructType output for \
Pandas UDFs'
)
if not self.input_type:
raise com.UnsupportedArgumentError(
'Spark does not support 0-arg pandas UDFs. Instead, create \
a 1-arg pandas UDF and ignore the arg in your function'
)
super().validate_func_and_types(func)
def pyspark_udf(self, func):
return f.pandas_udf(func, self.spark_output_type, self.pandas_udf_type)
class SparkPandasAggregateUDF(SparkPandasUDF):
base_class = SparkUDAFNode
pandas_udf_type = f.PandasUDFType.GROUPED_AGG
class udf:
class elementwise:
def __init__(self, input_type, output_type):
self._input_type = input_type
self._output_type = output_type
def __call__(self, func):
"""Define a UDF (user-defined function) that operates element wise
on a Spark DataFrame.
Parameters
----------
input_type : List[ibis.expr.datatypes.DataType]
A list of the types found in :mod:`~ibis.expr.datatypes`. The
length of this list must match the number of arguments to the
function. Variadic arguments are not yet supported.
output_type : ibis.expr.datatypes.DataType
The return type of the function.
Examples
--------
>>> import ibis
>>> import ibis.expr.datatypes as dt
>>> from ibis.spark.udf import udf
>>> @udf.elementwise(input_type=[dt.string], output_type=dt.int64)
... def my_string_length(x):
... return len(x) * 2
"""
return SparkUDF(self._input_type, self._output_type)(func)
@staticmethod
def pandas(input_type, output_type):
"""Define a Pandas UDF (user-defined function) that operates
element-wise on a Spark DataFrame. The content of the function
should operate on a pandas.Series.
Examples
--------
>>> import ibis
>>> import ibis.expr.datatypes as dt
>>> from ibis.spark.udf import udf
>>> @udf.elementwise.pandas([dt.string], dt.int64)
... def my_string_length(x):
... return x.str.len() * 2
"""
return SparkPandasUDF(input_type, output_type)
@staticmethod
def reduction(input_type, output_type):
"""Define a user-defined reduction function that takes N pandas Series
or scalar values as inputs and produces one row of output.
Examples
--------
>>> import ibis
>>> import ibis.expr.datatypes as dt
>>> from ibis.spark.udf import udf
>>> @udf.reduction(input_type=[dt.string], output_type=dt.int64)
... def my_string_length_agg(series, **kwargs):
... return (series.str.len() * 2).sum()
"""
return SparkPandasAggregateUDF(input_type, output_type)
| apache-2.0 |
zrhans/python | exemplos/Examples.lnk/bokeh/compat/mpl/lc_offsets.py | 13 | 1067 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from bokeh import mpl
from bokeh.plotting import show
# Simulate a series of ocean current profiles, successively
# offset by 0.1 m/s so that they form what is sometimes called
# a "waterfall" plot or a "stagger" plot.
nverts = 60
ncurves = 20
offs = (0.1, 0.0)
rs = np.random.RandomState([12345678])
yy = np.linspace(0, 2 * np.pi, nverts)
ym = np.amax(yy)
xx = (0.2 + (ym - yy) / ym) ** 2 * np.cos(yy - 0.4) * 0.5
segs = []
for i in range(ncurves):
xxx = xx + 0.02 * rs.randn(nverts)
curve = list(zip(xxx, yy * 100))
segs.append(curve)
colors = [(1.0, 0.0, 0.0, 1.0), (0.0, 0.5, 0.0, 1.0), (0.0, 0.0, 1.0, 1.0),
(0.0, 0.75, 0.75, 1.0), (0.75, 0.75, 0, 1.0), (0.75, 0, 0.75, 1.0),
(0.0, 0.0, 0.0, 1.0)]
col = LineCollection(segs, linewidth=5, offsets=offs)
ax = plt.axes()
ax.add_collection(col, autolim=True)
col.set_color(colors)
ax.set_title('Successive data offsets')
fig = plt.gcf()
show(mpl.to_bokeh(name="lc_offsets"))
| gpl-2.0 |
nmayorov/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
lenovor/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
t20100/sandbox | curves/CurvesView.py | 1 | 12272 | # coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""
Widget displaying the synthesis of many curves taken with the same X values
"""
from __future__ import division
__authors__ = ["T. VINCENT"]
__license__ = "MIT"
__date__ = "30/05/2017"
import logging
import numpy
from silx.gui import qt
from silx.gui.plot import Plot1D
_logger = logging.getLogger(__name__)
if hasattr(numpy, "nanmean"):
nanmean = numpy.nanmean
else: # Debian 7 support
def nanmean(data, axis=None):
"""Compute mean of none NaN elements
:param numpy.ndarray data: The array to process
:param axis: None or the axis index along which to compute the means.
"""
notNaNMask = numpy.logical_not(numpy.isnan(data))
return numpy.nansum(data, axis) / numpy.sum(notNaNMask, axis, dtype="int")
# TODO make the min/max background work for negative values...
# TODO split control widgets from curves plot
# TODO make curves handling not being a widget and make it interact with a plot
# TODO optimisation of min/mean/max computation
# TODO optimisation of plotting: no update curves when not in live mode
# TODO optimisation of plotting: no update of background when not 'visible' change
# TODO add std? in background
# TODO error bars of current curves
# TODO set number of curves displayed
# TODO matplotlib bad rendering of filled curves regarding edges
# TODO OO API with setters
class CurvesView(qt.QWidget):
"""Widget displaying statistical indicators over many curves
:param parent:
:param f:
"""
_sigAppendCurves = qt.Signal(object)
_sigSetXData = qt.Signal(object)
def __init__(self, parent=None, f=qt.Qt.WindowFlags()):
super(CurvesView, self).__init__(parent, f)
self._nbExtraCurves = 1
self._currentCurveColor = 0.0, 0.8, 0.0, 1.0
self._index = -1
self._x = None
self._data = None
self._min = None
self._max = None
self._sum = None
self._count = None
self._plot = Plot1D() # backend='matplotlib')
self._plot.setActiveCurveHandling(False)
layout = qt.QGridLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(self._plot, 0, 0, 1, 2)
self._slider = qt.QSlider(qt.Qt.Horizontal)
layout.addWidget(self._slider, 1, 0)
self._spinBox = qt.QSpinBox()
layout.addWidget(self._spinBox, 1, 1)
self._slider.valueChanged.connect(self._indexChanged)
self._spinBox.valueChanged.connect(self._indexChanged)
self._updateControlWidgets()
self._sigAppendCurves.connect(self._appendCurves)
self._sigSetXData.connect(self._setXData)
def getPlot(self):
"""Returns the used :class:`PlotWidget` plot."""
return self._plot
def setXData(self, x):
"""Set the X coordinates of the curves.
This method can be called from any thread.
:param numpy.ndarray x: The X coordinates of the curves.
"""
x = numpy.array(x, copy=True)
assert x.ndim == 1
self._sigSetXData.emit(x)
def _setXData(self, x):
"""Implements :meth:`setXData` in the main thread."""
if self._data is not None:
assert len(x) == self._data.shape[-1]
self._x = x
def getXData(self):
"""Returns the X coordiantes of the curves(numpy.ndarray)"""
return numpy.array(self._x, copy=True)
def clear(self):
"""Reset the plot by removing all curves"""
self._data = None
self._min = None
self._max = None
self._sum = None
self._count = None
self.getPlot().clear()
self.setCurrentCurveIndex(-1)
self._updateControlWidgets()
def _updateCurrentCurve(self):
"""Update the current curve in the plot"""
plot = self.getPlot()
data = self.getData(copy=False)
currentIndex = self.getCurrentCurveIndex(absolute=True)
for offset in range(-self._nbExtraCurves, self._nbExtraCurves + 1):
index = currentIndex + offset
if offset == 0:
continue
legend = "N%+d" % offset
plot.remove(legend=legend, kind="curve")
if 0 <= index < len(data):
distance = abs(offset) / (self._nbExtraCurves + 1)
if abs(offset) == 1: # first curve
linestyle = "-"
elif distance < 0.66:
linestyle = "--"
else:
linestyle = ":"
if offset < 0:
color = numpy.array(self._currentCurveColor) * 0.5
else:
color = "#FF9900"
plot.addCurve(
self.getXData(),
data[index],
legend=legend,
color=color,
linestyle=linestyle,
z=100,
resetzoom=False,
)
# Current curve
if currentIndex < len(data):
currentCurve = data[currentIndex]
plot.addCurve(
self.getXData(),
currentCurve,
legend="current",
color=self._currentCurveColor,
z=101,
linewidth=2,
resetzoom=False,
)
else:
plot.remove(legend="current", kind="curve")
def _indexChanged(self, index):
"""Handle spinBox or slider value changed"""
currentIndex = self.getCurrentCurveIndex(absolute=True)
if currentIndex != index:
# Do not update index if it is already OK
self.setCurrentCurveIndex(index)
elif index == len(self.getData(copy=False)) - 1:
# Set to last curve
self.setCurrentCurveIndex(-1)
def setCurrentCurveIndex(self, index=-1):
"""Perform update when current curve changed
:param int index:
The index of the current curve in the array
The index can be negative to start indexing from the end
Default: -1 = Lastest curve.
"""
data = self.getData(copy=False)
assert index in (-1, 0) or -len(data) <= index < len(data)
self._index = index
if self._index < 0:
absoluteIndex = len(data) + self._index
else:
absoluteIndex = self._index
self._spinBox.setValue(absoluteIndex)
self._slider.setValue(absoluteIndex)
self._updateCurrentCurve()
def getCurrentCurveIndex(self, absolute=False):
"""Returns the current curve index
:param bool absolute:
False (default) to get index as Python indexing (can be negative),
True to get current index from the beginning of the data array (>= 0).
:return: The index
:rtype: int
"""
if absolute and self._index < 0: # Negative index is from the end
return max(0, len(self.getData(copy=False)) + self._index)
else:
return self._index
def _updateControlWidgets(self):
"""Update widgets controlling """
nbCurves = len(self.getData(copy=False))
if self.getCurrentCurveIndex() >= 0:
index = nbCurves - 1
else:
index = nbCurves + self.getCurrentCurveIndex()
self._slider.setRange(0, index)
self._spinBox.setRange(0, index)
self._slider.setEnabled(nbCurves > 0)
self._spinBox.setEnabled(nbCurves > 0)
self.setCurrentCurveIndex(self.getCurrentCurveIndex())
def getData(self, copy=True):
"""Return displayed curves data
:param bool copy: True to get a copy (default),
False to get internal representation, do not modify.
:return: A copy of the data currently displayed
"""
if self._data is None:
return numpy.array(()).reshape(0, 0) # Empty 2D array
else:
return numpy.array(self._data, copy=copy)
def appendCurves(self, data):
"""Add curve(s) to the plot.
The data is always copied.
This method can be called from any thread.
:param numpy.ndarray data:
If 1D, it is a curve to append to the plot.
If 2D, it is a set of curves to append.
"""
data = numpy.atleast_2d(numpy.array(data, copy=True))
assert data.ndim == 2
self._sigAppendCurves.emit(data)
def _appendCurves(self, data):
"""Implements :meth:`appendCurves` in the main thread."""
plot = self.getPlot()
wasData = self._data is not None
if self._data is None:
if self._x is None:
self._x = numpy.arange(data.shape[-1])
assert len(self._x) == data.shape[-1]
self._data = data
else:
assert self._data.shape[-1] == data.shape[-1]
self._data = numpy.append(self._data, data, axis=0)
self._updateControlWidgets()
# Update plot background
z = 1
maxs = numpy.nanmax(self._data, axis=0)
plot.addCurve(
self.getXData(),
maxs,
legend="maximum",
color="#D0D0D0",
fill=True,
z=z,
linestyle="-",
resetzoom=False,
)
z += 1
mins = numpy.nanmin(self._data, axis=0)
plot.addCurve(
self.getXData(),
mins,
legend="minimum",
color="#FFFFFF",
fill=True,
z=z,
linestyle="-",
resetzoom=False,
)
z += 1
means = nanmean(self._data, axis=0)
plot.addCurve(
self.getXData(),
means,
legend="mean",
color="#FFFFFF80",
linewidth=2,
linestyle="-",
z=1000,
resetzoom=False,
)
# Draw current curve
self._updateCurrentCurve()
if not wasData:
self.resetZoom()
def resetZoom(self):
"""Reset Plot zoom"""
self.getPlot().resetZoom()
if __name__ == "__main__":
import glob
import threading
import time
# dummy data
x = numpy.linspace(0.0, 10.0, 1024)
y = numpy.sin(x) + 2
data = y[numpy.newaxis, :] + numpy.random.normal(0, 0.1, (1024, len(y)))
app = qt.QApplication([])
w = CurvesView()
# w.setAttribute(qt.Qt.WA_DeleteOnClose)
w.show()
w.setXData(x)
w.appendCurves(data)
w.resetZoom()
running = True
def addCurves():
index = 0
while running:
time.sleep(0.5)
w.appendCurves(data[index % len(data)])
index += 1
thread = threading.Thread(target=addCurves)
thread.start()
app.exec_()
print("closing...")
if thread:
running = False
thread.join(2)
| mit |
0asa/scikit-learn | sklearn/pipeline.py | 2 | 19361 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Parameters
----------
steps: list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
"""
# BaseEstimator interface
def __init__(self, steps):
self.named_steps = dict(steps)
names, estimators = zip(*steps)
if len(self.named_steps) != len(steps):
raise ValueError("Names provided are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(zip(names, estimators))
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps.copy()
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfil input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfil label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfil input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfil label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfil input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfil input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfil input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfil input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfil input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfil output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfil input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfil label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
eg-zhang/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 261 | 2836 | # Author: Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
barajasr/Baseball-Reference-Plotting | Plot.py | 1 | 9927 | import os
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import Auxiliary as aux
import BrefScraper as brf
class Plot(object):
""" With data obtained from BrefScraper, Plot clean the raw
data and saves it to file.
"""
def __init__(self, scraper=brf.BrefScraper(), histogram=True):
""" Set defaults to be used later in cleaning and plotting
"""
self.scraper = scraper
self.histogram = histogram
# Axes limit hints to use
self.x_min = 1
self.x_max = 10
self.y_min = -24
self.y_max = 24
# Constants
self.major, self.minor = 6, 3
def _clean_data(self, data):
""" For given raw data, split it and fill in missing keys
with zeroes.
"""
x_max = self._max_key(data)
negative, positive = [], []
bi_keys = aux.LOSS, aux.WIN
for index in range(self.x_min, x_max+1):
negative.append(0)
positive.append(0)
if index in data[bi_keys[0]]:
negative[-1] = -data[bi_keys[0]][index]
if not self.histogram:
if index != 0 or self.x_min != 0:
negative[-1] *= index
if index in data[bi_keys[1]]:
positive[-1] = data[bi_keys[1]][index]
if not self.histogram:
if index != 0 or self.x_min != 0:
positive[-1] *= index
return aux.Data(negative, positive)
def _fit_y_axis(self, data):
""" Adjust Y-axis range to next minor tick if required.
"""
y_min, y_max = self.y_min, self.y_max
set_min = min(data.negative)
if set_min <= self.y_min:
y_min = set_min - (self.minor - set_min % self.minor)
set_max = max(data.positive)
if set_max >= self.y_max:
y_max = set_max + (self.minor - set_max % self.minor)
return aux.Axis(y_min, y_max)
def _max_key(self, data):
""" Return the max x-axis value found in keys.
"""
dict_max = max([key for sub_data in data.values()
for key in sub_data])
key_max = self.x_max
if dict_max > self.x_max:
key_max = dict_max
return key_max
def plot(self, plot_type, average):
""" Main point of entry. Set off scraper, process and plot data.
"""
# Dict with appropiate functions for data transforming defined
# at bottom of module.
(self.x_min, teams_raw, team_set, get_clean, to_plot) = OPTIONS[plot_type]
cumulative = aux.Data([], [])
for team, raw_data in teams_raw(self.scraper):
raw_set = team_set(raw_data)
data = get_clean(self, raw_set)
to_plot(self, team, data)
if average:
aux.aggragate_cumulative(cumulative, data)
if average:
aux.average_data(cumulative, len(self.scraper.teams))
to_plot(self, 'League Average', cumulative)
def _plot_outcome_conceding(self, team, data):
""" Sets the specific params for of win/loss outcome when team concedes
x runs.
"""
y_axis = self._fit_y_axis(data)
record = aux.outcome_record(data, self.histogram)
y_label = 'Wins/losses when conceding x runs' if self.histogram else\
'Total runs sorted by runs conceded per game'
tag_label = 'outcome_conceding_histogram' if self.histogram else\
'outcome_conceding_sorted'
self._plot_team(data,
record,
aux.Labels(team, 'Runs conceded', y_label, tag_label),
y_axis)
def _plot_outcome_scoring(self, team, data):
""" Sets the specific params for of win/loss outcome when team scores
x runs.
"""
y_axis = self._fit_y_axis(data)
record = aux.outcome_record(data, self.histogram)
y_label = 'Wins/losses when scoring x runs' if self.histogram else\
'Total runs sorted by runs scored per game'
tag_label = 'outcome_scoring_histogram' if self.histogram else\
'outcome_scoring_sorted'
self._plot_team(data,
record,
aux.Labels(team, 'Runs scored', y_label, tag_label),
y_axis)
def _plot_team(self, data, record, labels, y_axis):
""" Generic plotting for data found on the team's schedule and results
page.
"""
net = [n + m for m, n in zip(data.negative, data.positive)]
fig = plt.figure()
plt.xlabel(labels.x)
plt.ylabel(labels.y)
# record turned into string for int/float possibilty
if isinstance(record.wins, int):
plt.title('{} ({}-{}) - {}'\
.format(labels.team, record.wins, record.losses, self.scraper.year))
else:
plt.title('{} ({:.2f}-{:.2f}) - {}'\
.format(labels.team, record.wins, record.losses, self.scraper.year))
x_max = len(data.negative) + 1 if self.x_min == 1 else len(data.negative)
plt.axis([self.x_min, x_max, y_axis.min, y_axis.max])
ax = plt.subplot()
ax.set_xticks(np.arange(1, x_max, 1))
major_locator = ticker.MultipleLocator(self.major)
major_formatter = ticker.FormatStrFormatter('%d')
minor_locator = ticker.MultipleLocator(self.minor)
ax.yaxis.set_major_locator(major_locator)
ax.yaxis.set_major_formatter(major_formatter)
ax.yaxis.set_minor_locator(minor_locator)
x_axis = range(self.x_min, x_max)
ax.bar(x_axis, data.negative, width=0.96, color='r', edgecolor=None, linewidth=0)
ax.bar(x_axis, data.positive, width=0.96, color='b', edgecolor=None, linewidth=0)
ax.bar(x_axis, net, width=0.96, color='g', edgecolor=None, linewidth=0, label='Net')
plt.axhline(0, color='black')
plt.grid(which='both')
ax.grid(which='minor', alpha=0.5)
ax.grid(which='major', alpha=0.9)
legend = ax.legend(loc='best')
frame = legend.get_frame()
frame.set_facecolor('0.90')
self._save(labels.team, labels.tag)
def _plot_win_loss_margins(self, team, data):
""" Sets the specific params for margins of win/loss plot.
"""
y_axis = self._fit_y_axis(data)
wins = sum(data.positive) if self.histogram else\
sum([runs // (margin + 1) \
for margin, runs in enumerate(data.positive)])
losses = -sum(data.negative) if self.histogram else\
-sum([runs // (margin + 1) \
for margin, runs in enumerate(data.negative)])
y_label = '# of times won/loss by margin' if self.histogram else\
'Total Runs sorted by margin'
tag_label = 'margin_histogram' if self.histogram else 'margin_sorted'
self._plot_team(data,
aux.Record(wins, losses),
aux.Labels(team, 'Margin of win/loss', y_label, tag_label),
y_axis)
def _plot_win_loss_streaks(self, team, data):
""" Sets the specific params for win/loss streaks plot.
"""
y_axis = self._fit_y_axis(data)
wins = sum([(m + 1) * n for m, n in enumerate(data.positive)]) \
if self.histogram else sum(data.positive)
losses = -sum([(m + 1) * n for m, n in enumerate(data.negative)]) \
if self.histogram else -sum(data.negative)
y_label = '# of Streaks' if self.histogram else 'Win/Losses sorted by streak'
tag_label = 'streaks_histogram' if self.histogram else 'streaks_sorted'
self._plot_team(data,
aux.Record(wins, losses),
aux.Labels(team, 'Streak Length', y_label, tag_label),
y_axis)
def _save(self, filename, directory, ext='png', close=True, verbose=True):
""" Save the current plot to file.
"""
# Unpack list with splat
year = self.scraper.year
path = year if directory == [] else os.path.join(year, directory)
if not os.path.exists(path):
os.makedirs(path)
savepath = os.path.join(path, filename + '.' + ext)
if verbose:
print("Saving figure to '{}'...".format(savepath))
plt.savefig(savepath)
if close:
plt.close()
def set_default_axes(self, x_min=1, x_max=10, y_min=-24, y_max=24):
""" Adjust default axes range.
"""
self.x_min, self.x_max = x_min, x_max
self.y_min, self.y_max = y_min, y_max
# Data transformation and plotting chain
OPTIONS = {'outcome_conceding': [0,
brf.BrefScraper.game_scores,
aux.outcome_when_conceding,
Plot._clean_data,
Plot._plot_outcome_conceding],
'outcome_scoring': [0,
brf.BrefScraper.game_scores,
aux.outcome_when_scoring,
Plot._clean_data,
Plot._plot_outcome_scoring],
'win_loss_streaks' : [1,
brf.BrefScraper.wins_losses,
aux.count_wins_losses,
Plot._clean_data,
Plot._plot_win_loss_streaks],
'win_loss_margins' : [1,
brf.BrefScraper.game_scores,
aux.win_loss_margins,
Plot._clean_data,
Plot._plot_win_loss_margins]}
| bsd-2-clause |
mathdd/numpy | numpy/core/code_generators/ufunc_docstrings.py | 51 | 90047 | """
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
from __future__ import division, absolute_import, print_function
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10])
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, in which to store the results.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
is returned, otherwise an array.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray or scalar
The quotient ``x1/x2``, element-wise. Returns a scalar if
both ``x1`` and ``x2`` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and
division by zero.
Notes
-----
Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting.
Behavior on division by zero can be changed using ``seterr``.
In Python 2, when both ``x1`` and ``x2`` are of an integer type,
``divide`` will behave like ``floor_divide``. In Python 3, it behaves
like ``true_divide``.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types (Python 2 only):
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic (again,
Python 2 only), and does not raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using ``seterr``:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with
magnitude 1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray or scalar
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy instead uses the definition of
`floor` where `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the
inputs.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the C library function fmod, the
remainder has the same sign as the dividend `x1`. It is equivalent to
the Matlab(TM) ``rem`` function and should not be confused with the
Python modulus operator ``x1 % x2``.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Equivalent to the Python ``%`` operator.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors
is bound by conventions. For `fmod`, the sign of result is the sign of
the dividend, while for `remainder` the sign of the result is the sign
of the divisor. The `fmod` function is equivalent to the Matlab(TM)
``rem`` function.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned. In a
two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit
two's-complement system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the
corresponding element of the input is finite; otherwise the values
are False (element is either positive infinity, negative infinity
or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity. Errors result if the
second argument is also supplied when `x` is a scalar input, or if
first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Returns a boolean array of the same shape as `x`, True where ``x ==
+/-inf``, otherwise False.
Parameters
----------
x : array_like
Input values
out : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : bool (scalar) or boolean ndarray
For scalar input, the result is a new boolean with value True if
the input is positive or negative infinity; otherwise the value is
False.
For array input, the result is a boolean array with the same shape
as the input and the values are True where the corresponding
element of the input is positive or negative infinity; elsewhere
the values are False. If a second argument was supplied the result
is stored there. If the type of that array is a numeric type the
result is represented as zeros and ones, if the type is boolean
then as False and True, respectively. The return value `y` is then
a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray or bool
For scalar input, the result is a new boolean with value True if
the input is NaN; otherwise the value is False.
For array input, the result is a boolean array of the same
dimensions as the input and the values are True if the
corresponding element of the input is NaN; otherwise the values are
False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it.
`log10` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small as
to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it.
`log1p` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. They must
be broadcastable to the same shape.
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
Element-wise minimum of two arrays, propagates NaNs.
fmax :
Element-wise maximum of two arrays, ignores NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when
neither x1 nor x2 are nans, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
Element-wise maximum of two arrays, propagates NaNs.
fmin :
Element-wise minimum of two arrays, ignores NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when
neither x1 nor x2 are NaNs, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
Element-wise minimum of two arrays, ignores NaNs.
maximum :
Element-wise maximum of two arrays, propagates NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
Element-wise maximum of two arrays, ignores NaNs.
minimum :
Element-wise minimum of two arrays, propagates NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Numerical negative, element-wise.
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a specific
function for that has been written for consistency with the other
*_like functions. It is only used internally in a limited fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes ``x1 - floor(x1 / x2) * x2``, the result has the same sign as
the divisor `x2`. It is equivalent to the Python modulus operator
``x1 % x2`` and should not be confused with the Matlab(TM) ``rem``
function.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The remainder of the quotient ``x1/x2``, element-wise. Returns a
scalar if both `x1` and `x2` are scalars.
See Also
--------
fmod : Equivalent of the Matlab(TM) ``rem`` function.
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right `x2`. Because the internal
representation of numbers is in binary format, this operation is
equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1 : array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and NaN is NaN.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry (the
mathematical study of triangles). Consider a circle of radius 1
centered on the origin. A ray comes in from the :math:`+x` axis, makes
an angle at the origin (measured counter-clockwise from that axis), and
departs from the origin. The :math:`y` coordinate of the outgoing
ray's intersection with the unit circle is the sine of that angle. It
ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The
function has zeroes where the angle is a multiple of :math:`\\pi`.
Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative.
The numerous properties of the sine and related functions are included
in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'cbrt',
"""
Return the cube-root of an array, element-wise.
.. versionadded:: 1.10.0
Parameters
----------
x : array_like
The values whose cube-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the cube
cube-root of each element in `x`.
If `out` was provided, `y` is a reference to it.
Examples
--------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making
``//`` and ``/`` equivalent operators. The default floor division
operation of ``/`` can be replaced by true division with ``from
__future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.
The mantissa is lies in the open interval(-1, 1), while the twos
exponent is a signed integer.
Parameters
----------
x : array_like
Array of numbers to be decomposed.
out1 : ndarray, optional
Output array for the mantissa. Must have the same shape as `x`.
out2 : ndarray, optional
Output array for the exponent. Must have the same shape as `x`.
Returns
-------
(mantissa, exponent) : tuple of ndarrays, (float, int)
`mantissa` is a float array with values between -1 and 1.
`exponent` is an int array which represents the exponent of 2.
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_newdoc('numpy.core.umath', 'ldexp',
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : array_like
Array of multipliers.
x2 : array_like, int
Array of twos exponents.
out : ndarray, optional
Output array for the result.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float32)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/core/computation/pytables.py | 7 | 18930 | """ manage PyTables query interface via Expressions """
import ast
from functools import partial
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_list_like
import pandas.core.common as com
from pandas.compat import u, string_types, DeepChainMap
from pandas.core.base import StringMixin
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
from pandas.core.computation import expr, ops
from pandas.core.computation.ops import is_term, UndefinedVariableError
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.common import _ensure_decoded
from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type
class Scope(expr.Scope):
__slots__ = 'queryables',
def __init__(self, level, global_dict=None, local_dict=None,
queryables=None):
super(Scope, self).__init__(level + 1, global_dict=global_dict,
local_dict=local_dict)
self.queryables = queryables or dict()
class Term(ops.Term):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = StringMixin.__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
super(Term, self).__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == 'left':
if self.name not in self.env.queryables:
raise NameError('name {0!r} is not defined'.format(self.name))
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
@property
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
def __init__(self, op, lhs, rhs, queryables, encoding):
super(BinOp, self).__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.filter = None
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if (isinstance(left, ConditionBinOp) and
isinstance(right, ConditionBinOp)):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if (isinstance(left, FilterBinOp) and
isinstance(right, FilterBinOp)):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(self.op, left, right, queryables=self.queryables,
encoding=self.encoding).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
""" inplace conform rhs """
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self):
""" return True if this is a valid field """
return self.lhs in self.queryables
@property
def is_in_table(self):
""" return True if this is a valid column name for generation (e.g. an
actual column in the table) """
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
""" the kind of my field """
return getattr(self.queryables.get(self.lhs), 'kind', None)
@property
def meta(self):
""" the meta of my field """
return getattr(self.queryables.get(self.lhs), 'meta', None)
@property
def metadata(self):
""" the metadata of my field """
return getattr(self.queryables.get(self.lhs), 'metadata', None)
def generate(self, v):
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return "(%s %s %s)" % (self.lhs, self.op, val)
def convert_value(self, v):
""" convert the expression that is in the term to something that is
accepted by pytables """
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
if kind == u('datetime64') or kind == u('datetime'):
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = pd.Timestamp(v)
if v.tz is not None:
v = v.tz_convert('UTC')
return TermValue(v, v.value, kind)
elif kind == u('timedelta64') or kind == u('timedelta'):
v = _coerce_scalar_to_timedelta_type(v, unit='s').value
return TermValue(int(v), v, kind)
elif meta == u('category'):
metadata = com._values_from_object(self.metadata)
result = metadata.searchsorted(v, side='left')
# result returns 0 if v is first element or if v is not in metadata
# check that metadata contains v
if not result and v not in metadata:
result = -1
return TermValue(result, result, u('integer'))
elif kind == u('integer'):
v = int(float(v))
return TermValue(v, v, kind)
elif kind == u('float'):
v = float(v)
return TermValue(v, v, kind)
elif kind == u('bool'):
if isinstance(v, string_types):
v = not v.strip().lower() in [u('false'), u('f'), u('no'),
u('n'), u('none'), u('0'),
u('[]'), u('{}'), u('')]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, string_types):
# string quoting
return TermValue(v, stringify(v), u('string'))
else:
raise TypeError(("Cannot compare {v} of type {typ}"
" to {kind} column").format(v=v, typ=type(v),
kind=kind))
def convert_values(self):
pass
class FilterBinOp(BinOp):
def __unicode__(self):
return pprint_thing("[Filter : [{0}] -> "
"[{1}]".format(self.filter[0], self.filter[1]))
def invert(self):
""" invert the filter """
if self.filter is not None:
f = list(self.filter)
f[1] = self.generate_filter_op(invert=True)
self.filter = tuple(f)
return self
def format(self):
""" return the actual filter format """
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [%s]" % self)
rhs = self.conform(self.rhs)
values = [TermValue(v, v, self.kind) for v in rhs]
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ['==', '!='] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
return self
return None
# equality conditions
if self.op in ['==', '!=']:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
else:
raise TypeError(
"passing a filterable condition to a non-table indexer [%s]" %
self)
return self
def generate_filter_op(self, invert=False):
if (self.op == '!=' and not invert) or (self.op == '==' and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __unicode__(self):
return pprint_thing("[Condition : [{0}]]".format(self.condition))
def invert(self):
""" invert the condition """
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError("cannot use an invert condition when "
"passing to numexpr")
def format(self):
""" return the actual ne format """
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [%s]" % self)
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ['==', '!=']:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = "(%s)" % ' | '.join(vs)
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = "(%s %s %s)" % (
self.lhs.condition,
self.op,
self.rhs.condition)
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != '~':
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None:
if issubclass(klass, ConditionBinOp):
if operand.condition is not None:
return operand.invert()
elif issubclass(klass, FilterBinOp):
if operand.filter is not None:
return operand.invert()
return None
_op_classes = {'unary': UnaryOp}
class ExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super(ExprVisitor, self).__init__(env, engine, parser)
for bin_op in self.binary_ops:
setattr(self, 'visit_{0}'.format(self.binary_op_nodes_map[bin_op]),
lambda node, bin_op=bin_op: partial(BinOp, bin_op,
**kwargs))
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp('~', self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError('Unary addition not supported')
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0],
comparators=[node.value])
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple suscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except:
pass
try:
return self.const_type(value[slobj], self.env)
except TypeError:
raise ValueError("cannot subscript {0!r} with "
"{1!r}".format(value, slobj))
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx.__class__
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overriden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {0}".format(ctx.__name__))
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (Expr, string_types)) or is_list_like(w)):
raise TypeError("where must be passed as a string, Expr, "
"or list-like of Exprs")
return w
class Expr(expr.Expr):
""" hold a pytables like expression, comprised of possibly multiple 'terms'
Parameters
----------
where : string term expression, Expr, or list-like of Exprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
an Expr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
def __init__(self, where, queryables=None, encoding=None, scope_level=0):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict = DeepChainMap()
if isinstance(where, Expr):
local_dict = where.env.scope
where = where.expr
elif isinstance(where, (list, tuple)):
for idx, w in enumerate(where):
if isinstance(w, Expr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
where = ' & ' .join(["(%s)" % w for w in where]) # noqa
self.expr = where
self.env = Scope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, string_types):
self.env.queryables.update(queryables)
self._visitor = ExprVisitor(self.env, queryables=queryables,
parser='pytables', engine='pytables',
encoding=encoding)
self.terms = self.parse()
def __unicode__(self):
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError:
raise ValueError("cannot process expression [{0}], [{1}] is not a "
"valid condition".format(self.expr, self))
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError:
raise ValueError("cannot process expression [{0}], [{1}] is not a "
"valid filter".format(self.expr, self))
return self.condition, self.filter
class TermValue(object):
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind):
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding):
""" quote the string if not encoded
else encode and return """
if self.kind == u'string':
if encoding is not None:
return self.converted
return '"%s"' % self.converted
elif self.kind == u'float':
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return self.converted
def maybe_expression(s):
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, string_types):
return False
ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',)
# make sure we have an op at least
return any(op in s for op in ops)
| mit |
magic2du/contact_matrix | Contact_maps/DeepLearning/DeepLearningTool/DL_contact_matrix_load2-new10fold_12_15_2014_server.py | 1 | 43156 |
# coding: utf-8
# In[3]:
import sys, os
sys.path.append('../../../libs/')
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import numpy as np
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pdb
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
from DL_libs import *
from itertools import izip #new
import math
from sklearn.svm import SVC
# In[4]:
#filename = 'SUCCESS_log_CrossValidation_load_DL_remoteFisherM1_DL_RE_US_DL_RE_US_1_1_19MAY2014.txt'
#filename = 'listOfDDIsHaveOver2InterfacesHave40-75_Examples_2010_real_selected.txt' #for testing
# set settings for this script
settings = {}
settings['filename'] = 'ddi_examples_40_60_over2top_diff_name_2014.txt'
settings['fisher_mode'] = 'FisherM1'
settings['with_auc_score'] = False
settings['reduce_ratio'] = 1
settings['SVM'] = 1
settings['DL'] = 0
settings['SAE_SVM'] = 1
settings['SVM_RBF'] = 1
settings['SAE_SVM_RBF'] = 1
settings['SVM_POLY'] = 0
settings['DL_S'] = 0
settings['DL_U'] = 0
settings['finetune_lr'] = 1
settings['batch_size'] = 100
settings['pretraining_interations'] = 5001
settings['pretrain_lr'] = 0.001
settings['training_epochs'] = 1502
settings['hidden_layers_sizes'] = [100, 100]
settings['corruption_levels'] = [0,0]
filename = settings['filename']
file_obj = FileOperator(filename)
ddis = file_obj.readStripLines()
import logging
import time
current_date = time.strftime("%m_%d_%Y")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logname = 'log_DL_contact_matrix_load' + current_date + '.log'
handler = logging.FileHandler(logname)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
logger.info('Input DDI file: ' + filename)
#logger.debug('This message should go to the log file')
for key, value in settings.items():
logger.info(key +': '+ str(value))
# In[5]:
# In[28]:
class DDI_family_base(object):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/du/Documents/Vectors_Fishers_aaIndex_raw_2014/'):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/sun/Downloads/contactmatrix/contactmatrixanddeeplearningcode/data_test/'):
def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/big/du/Protein_Protein_Interaction_Project/Contact_Matrix_Project/Vectors_Fishers_aaIndex_raw_2014_paper/'):
""" get total number of sequences in a ddi familgy
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
LOO_data['FisherM1'][1]
"""
self.ddi = ddi
self.Vectors_Fishers_aaIndex_raw_folder = Vectors_Fishers_aaIndex_raw_folder
self.ddi_folder = self.Vectors_Fishers_aaIndex_raw_folder + ddi + '/'
self.total_number_of_sequences = self.get_total_number_of_sequences()
self.raw_data = {}
self.positve_negative_number = {}
self.equal_size_data = {}
for seq_no in range(1, self.total_number_of_sequences+1):
self.raw_data[seq_no] = self.get_raw_data_for_selected_seq(seq_no)
try:
#positive_file = self.ddi_folder + 'numPos_'+ str(seq_no) + '.txt'
#file_obj = FileOperator(positive_file)
#lines = file_obj.readStripLines()
#import pdb; pdb.set_trace()
count_pos = int(np.sum(self.raw_data[seq_no][:, -1]))
count_neg = self.raw_data[seq_no].shape[0] - count_pos
#self.positve_negative_number[seq_no] = {'numPos': int(float(lines[0]))}
#assert int(float(lines[0])) == count_pos
self.positve_negative_number[seq_no] = {'numPos': count_pos}
#negative_file = self.ddi_folder + 'numNeg_'+ str(seq_no) + '.txt'
#file_obj = FileOperator(negative_file)
#lines = file_obj.readStripLines()
#self.positve_negative_number[seq_no]['numNeg'] = int(float(lines[0]))
self.positve_negative_number[seq_no]['numNeg'] = count_neg
except Exception,e:
print ddi, seq_no
print str(e)
logger.info(ddi + str(seq_no))
logger.info(str(e))
# get data for equal positive and negative
n_pos = self.positve_negative_number[seq_no]['numPos']
n_neg = self.positve_negative_number[seq_no]['numNeg']
index_neg = range(n_pos, n_pos + n_neg)
random.shuffle(index_neg)
index_neg = index_neg[: n_pos]
positive_examples = self.raw_data[seq_no][ : n_pos, :]
negative_examples = self.raw_data[seq_no][index_neg, :]
self.equal_size_data[seq_no] = np.vstack((positive_examples, negative_examples))
def get_LOO_training_and_reduced_traing(self, seq_no, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
""" get the leave one out traing data, reduced traing
Parameters:
seq_no:
fisher_mode: default 'FisherM1ONLY'
Returns:
(train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
"""
train_X_LOO = np.array([])
train_y_LOO = np.array([])
train_X_reduced = np.array([])
train_y_reduced = np.array([])
total_number_of_sequences = self.total_number_of_sequences
equal_size_data_selected_sequence = self.equal_size_data[seq_no]
#get test data for selected sequence
test_X, test_y = self.select_X_y(equal_size_data_selected_sequence, fisher_mode = fisher_mode)
total_sequences = range(1, total_number_of_sequences+1)
loo_sequences = [i for i in total_sequences if i != seq_no]
number_of_reduced = len(loo_sequences)/reduce_ratio if len(loo_sequences)/reduce_ratio !=0 else 1
random.shuffle(loo_sequences)
reduced_sequences = loo_sequences[:number_of_reduced]
#for loo data
for current_no in loo_sequences:
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_LOO.ndim ==1:
train_X_LOO = current_X
else:
train_X_LOO = np.vstack((train_X_LOO, current_X))
train_y_LOO = np.concatenate((train_y_LOO, current_y))
#for reduced data
for current_no in reduced_sequences:
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_reduced.ndim ==1:
train_X_reduced = current_X
else:
train_X_reduced = np.vstack((train_X_reduced, current_X))
train_y_reduced = np.concatenate((train_y_reduced, current_y))
return (train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
#def get_ten_fold_crossvalid_one_subset(self, start_subset, end_subset, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
def get_ten_fold_crossvalid_one_subset(self, train_index, test_index, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
""" get traing data, reduced traing data for 10-fold crossvalidation
Parameters:
start_subset: index of start of the testing data
end_subset: index of end of the testing data
fisher_mode: default 'FisherM1ONLY'
Returns:
(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y)
"""
train_X_10fold = np.array([])
train_y_10fold = np.array([])
train_X_reduced = np.array([])
train_y_reduced = np.array([])
test_X = np.array([])
test_y = np.array([])
total_number_of_sequences = self.total_number_of_sequences
#get test data for selected sequence
#for current_no in range(start_subset, end_subset):
for num in test_index:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if test_X.ndim ==1:
test_X = current_X
else:
test_X = np.vstack((test_X, current_X))
test_y = np.concatenate((test_y, current_y))
#total_sequences = range(1, total_number_of_sequences+1)
#ten_fold_sequences = [i for i in total_sequences if not(i in range(start_subset, end_subset))]
#number_of_reduced = len(ten_fold_sequences)/reduce_ratio if len(ten_fold_sequences)/reduce_ratio !=0 else 1
#random.shuffle(ten_fold_sequences)
#reduced_sequences = ten_fold_sequences[:number_of_reduced]
number_of_reduced = len(train_index)/reduce_ratio if len(train_index)/reduce_ratio !=0 else 1
random.shuffle(train_index)
reduced_sequences = train_index[:number_of_reduced]
#for 10-fold cross-validation data
#for current_no in ten_fold_sequences:
for num in train_index:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_10fold.ndim ==1:
train_X_10fold = current_X
else:
train_X_10fold = np.vstack((train_X_10fold, current_X))
train_y_10fold = np.concatenate((train_y_10fold, current_y))
#for reduced data
for num in reduced_sequences:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_reduced.ndim ==1:
train_X_reduced = current_X
else:
train_X_reduced = np.vstack((train_X_reduced, current_X))
train_y_reduced = np.concatenate((train_y_reduced, current_y))
return (train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y)
def get_total_number_of_sequences(self):
""" get total number of sequences in a ddi familgy
Parameters:
ddi: string
Vectors_Fishers_aaIndex_raw_folder: string
Returns:
n: int
"""
folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
filename = folder_path +'allPairs.txt'
all_pairs = np.loadtxt(filename)
return len(all_pairs)
def get_raw_data_for_selected_seq(self, seq_no):
""" get raw data for selected seq no in a family
Parameters:
ddi:
seq_no:
Returns:
data: raw data in the sequence file
"""
folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
filename = folder_path + 'F0_20_F1_20_Sliding_17_11_F0_20_F1_20_Sliding_17_11_ouput_'+ str(seq_no) + '.txt'
data = np.loadtxt(filename)
return data
def select_X_y(self, data, fisher_mode = ''):
""" select subset from the raw input data set
Parameters:
data: data from matlab txt file
fisher_mode: subset base on this Fisher of AAONLY...
Returns:
selected X, y
"""
y = data[:,-1] # get lable
if fisher_mode == 'FisherM1': # fisher m1 plus AA index
a = data[:, 20:227]
b = data[:, 247:454]
X = np.hstack((a,b))
elif fisher_mode == 'FisherM1ONLY':
a = data[:, 20:40]
b = data[:, 247:267]
X = np.hstack((a,b))
elif fisher_mode == 'AAONLY':
a = data[:, 40:227]
b = data[:, 267:454]
X = np.hstack((a,b))
else:
raise('there is an error in mode')
return X, y
# In[28]:
# In[29]:
import sklearn.preprocessing
def performance_score(target_label, predicted_label, with_auc_score = False, print_report = True):
""" get performance matrix for prediction
Attributes:
target_label: int 0, 1
predicted_label: 0, 1 or ranking
with_auc_score: bool if False, predicted_label is from 0, 1. If Ture, predicted_label is ranked, need to get AUC score.
print_report: if True, print the perfromannce on screen
"""
import sklearn
from sklearn.metrics import roc_auc_score
score = {}
if with_auc_score == False:
score['accuracy'] = sklearn.metrics.accuracy_score(target_label, predicted_label)
score['precision'] = sklearn.metrics.precision_score(target_label, predicted_label, pos_label=1)
score['recall'] = sklearn.metrics.recall_score(target_label, predicted_label, pos_label=1)
if with_auc_score == True:
auc_score = roc_auc_score(target_label, predicted_label)
score['auc_score'] = auc_score
target_label = [x >= 0.5 for x in target_label]
score['accuracy'] = sklearn.metrics.accuracy_score(target_label, predicted_label)
score['precision'] = sklearn.metrics.precision_score(target_label, predicted_label, pos_label=1)
score['recall'] = sklearn.metrics.recall_score(target_label, predicted_label, pos_label=1)
if print_report == True:
for key, value in score.iteritems():
print key, '{percent:.1%}'.format(percent=value)
return score
def saveAsCsv(with_auc_score, fname, score_dict, arguments): #new
newfile = False
if os.path.isfile('report_' + fname + '.csv'):
pass
else:
newfile = True
csvfile = open('report_' + fname + '.csv', 'a+')
writer = csv.writer(csvfile)
if newfile == True:
if with_auc_score == False:
writer.writerow(['no.', 'method', 'isTest']+ score_dict.keys()) #, 'AUC'])
else:
writer.writerow(['no.', 'method', 'isTest'] + score_dict.keys())
for arg in arguments:
writer.writerow([i for i in arg])
csvfile.close()
def LOO_out_performance_for_all(ddis):
for ddi in ddis:
try:
one_ddi_family = LOO_out_performance_for_one_ddi(ddi)
one_ddi_family.get_LOO_perfermance(settings = settings)
except Exception,e:
print str(e)
logger.info("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
class LOO_out_performance_for_one_ddi(object):
""" get the performance of ddi families
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
"""
def __init__(self, ddi):
self.ddi_obj = DDI_family_base(ddi)
self.ddi = ddi
def get_LOO_perfermance(self, settings = None):
fisher_mode = settings['fisher_mode']
analysis_scr = []
with_auc_score = settings['with_auc_score']
reduce_ratio = settings['reduce_ratio']
for seq_no in range(1, self.ddi_obj.total_number_of_sequences+1):
print seq_no
logger.info('sequence number: ' + str(seq_no))
if settings['SVM']:
print "SVM"
(train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_LOO_training_and_reduced_traing(seq_no,fisher_mode = fisher_mode, reduce_ratio = reduce_ratio)
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, train_y_reduced)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# Deep learning part
min_max_scaler = Preprocessing_Scaler_with_mean_point5()
X_train_pre_validation_minmax = min_max_scaler.fit(train_X_reduced)
X_train_pre_validation_minmax = min_max_scaler.transform(train_X_reduced)
x_test_minmax = min_max_scaler.transform(test_X)
pretraining_X_minmax = min_max_scaler.transform(train_X_LOO)
x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(X_train_pre_validation_minmax,
train_y_reduced
, test_size=0.4, random_state=42)
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = settings['training_epochs']
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
if settings['DL']:
print "direct deep learning"
# direct deep learning
sda = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if 0:
# deep learning using unlabeled data for pretraining
print 'deep learning with unlabel data'
pretraining_epochs_for_reduced = cal_epochs(1500, pretraining_X_minmax, batch_size = batch_size)
sda_unlabel = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
pretraining_X_minmax = pretraining_X_minmax,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs_for_reduced,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_unlabel.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_unlabel.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
if settings['DL_S']:
# deep learning using split network
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = settings['training_epochs']
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
sda_transformed = trainSda(new_x_train_minmax_whole, y_train_minmax,
new_x_validationt_minmax_whole, y_validation_minmax ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
report_name = filename + '_' + '_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + str(reduce_ratio)+ '_' +str(training_epochs) + '_' + current_date
saveAsCsv(with_auc_score, report_name, performance_score(y_test, test_predicted, with_auc_score), analysis_scr)
# In[29]:
# In[30]:
#for 10-fold cross validation
def ten_fold_crossvalid_performance_for_all(ddis):
for ddi in ddis:
try:
process_one_ddi_tenfold(ddi)
except Exception,e:
print str(e)
logger.debug("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
def process_one_ddi_tenfold(ddi):
"""A function to waste CPU cycles"""
logger.info('DDI: %s' % ddi)
one_ddi_family = {}
one_ddi_family[ddi] = Ten_fold_crossvalid_performance_for_one_ddi(ddi)
one_ddi_family[ddi].get_ten_fold_crossvalid_perfermance(settings=settings)
return None
class Ten_fold_crossvalid_performance_for_one_ddi(object):
""" get the performance of ddi families
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
"""
def __init__(self, ddi):
self.ddi_obj = DDI_family_base(ddi)
self.ddi = ddi
def get_ten_fold_crossvalid_perfermance(self, settings = None):
fisher_mode = settings['fisher_mode']
analysis_scr = []
with_auc_score = settings['with_auc_score']
reduce_ratio = settings['reduce_ratio']
#for seq_no in range(1, self.ddi_obj.total_number_of_sequences+1):
#subset_size = math.floor(self.ddi_obj.total_number_of_sequences / 10.0)
kf = KFold(self.ddi_obj.total_number_of_sequences, n_folds = 10, shuffle = True)
#for subset_no in range(1, 11):
for ((train_index, test_index),subset_no) in izip(kf,range(1,11)):
#for train_index, test_index in kf;
print("Subset:", subset_no)
print("Train index: ", train_index)
print("Test index: ", test_index)
#logger.info('subset number: ' + str(subset_no))
(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_ten_fold_crossvalid_one_subset(train_index, test_index, fisher_mode = fisher_mode, reduce_ratio = reduce_ratio)
if settings['SVM']:
print "SVM"
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, train_y_reduced)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_RBF']:
print "SVM_RBF"
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_POLY']:
print "SVM_POLY"
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
L1_SVC_POLY_Selector = SVC(C=1, kernel='poly').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_POLY_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_POLY_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# direct deep learning
min_max_scaler = Preprocessing_Scaler_with_mean_point5()
X_train_pre_validation_minmax = min_max_scaler.fit(train_X_reduced)
X_train_pre_validation_minmax = min_max_scaler.transform(train_X_reduced)
x_test_minmax = min_max_scaler.transform(test_X)
pretraining_X_minmax = min_max_scaler.transform(train_X_10fold)
x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(X_train_pre_validation_minmax,
train_y_reduced
, test_size=0.4, random_state=42)
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = settings['training_epochs']
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
if settings['SAE_SVM']:
# SAE_SVM
print 'SAE followed by SVM'
x = X_train_pre_validation_minmax
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(X_train_pre_validation_minmax)
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax)
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_minmax_A, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_minmax_A)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_minmax_A)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_RBF']:
print 'SAE followed by SVM RBF'
x = X_train_pre_validation_minmax
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(X_train_pre_validation_minmax)
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax)
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_minmax_A, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_A)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_A)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['DL']:
print "direct deep learning"
sda = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if settings['DL_U']:
# deep learning using unlabeled data for pretraining
print 'deep learning with unlabel data'
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
sda_unlabel = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
pretraining_X_minmax = pretraining_X_minmax,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_unlabel.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_unlabel.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
if settings['DL_S']:
# deep learning using split network
y_test = test_y
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
sda_transformed = trainSda(new_x_train_minmax_whole, y_train_minmax,
new_x_validationt_minmax_whole, y_validation_minmax ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
report_name = filename + '_' + '_test10fold_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + str(reduce_ratio)+ '_' + str(training_epochs) + '_' + current_date
saveAsCsv(with_auc_score, report_name, performance_score(test_y, predicted_test_y, with_auc_score), analysis_scr)
# In[1]:
ten_fold_crossvalid_performance_for_all(ddis[:])
# In[ ]:
#LOO_out_performance_for_all(ddis)
# In[25]:
x = logging._handlers.copy()
for i in x:
log.removeHandler(i)
i.flush()
i.close()
# In[ ]:
| gpl-2.0 |
justincassidy/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/nolearn-0.5/nolearn/tests/test_lasagne.py | 2 | 5731 | from mock import patch
from lasagne.layers import DenseLayer
from lasagne.layers import DropoutLayer
from lasagne.layers import InputLayer
from lasagne.nonlinearities import identity
from lasagne.nonlinearities import softmax
from lasagne.updates import nesterov_momentum
import numpy as np
import pytest
from sklearn.base import clone
from sklearn.datasets import load_boston
from sklearn.datasets import fetch_mldata
from sklearn.datasets import make_regression
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
import theano.tensor as T
@pytest.fixture(scope='session')
def mnist():
dataset = fetch_mldata('mnist-original')
X, y = dataset.data, dataset.target
X = X.astype(np.float32) / 255.0
y = y.astype(np.int32)
return shuffle(X, y, random_state=42)
@pytest.fixture(scope='session')
def boston():
dataset = load_boston()
X, y = dataset.data, dataset.target
# X, y = make_regression(n_samples=100000, n_features=13)
X = StandardScaler().fit_transform(X).astype(np.float32)
y = y.reshape(-1, 1).astype(np.float32)
return shuffle(X, y, random_state=42)
def test_lasagne_functional_mnist(mnist):
# Run a full example on the mnist dataset
from nolearn.lasagne import NeuralNet
X, y = mnist
X_train, y_train = X[:60000], y[:60000]
X_test, y_test = X[60000:], y[60000:]
epochs = []
def on_epoch_finished(nn, train_history):
epochs[:] = train_history
if len(epochs) > 1:
raise StopIteration()
nn = NeuralNet(
layers=[
('input', InputLayer),
('hidden1', DenseLayer),
('dropout1', DropoutLayer),
('hidden2', DenseLayer),
('dropout2', DropoutLayer),
('output', DenseLayer),
],
input_shape=(None, 784),
output_num_units=10,
output_nonlinearity=softmax,
more_params=dict(
hidden1_num_units=512,
hidden2_num_units=512,
),
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=5,
on_epoch_finished=on_epoch_finished,
)
nn.fit(X_train, y_train)
assert len(epochs) == 2
assert epochs[0]['valid_accuracy'] > 0.85
assert epochs[1]['valid_accuracy'] > epochs[0]['valid_accuracy']
assert sorted(epochs[0].keys()) == [
'epoch', 'train_loss', 'valid_accuracy', 'valid_loss',
]
y_pred = nn.predict(X_test)
assert accuracy_score(y_pred, y_test) > 0.85
def test_lasagne_functional_grid_search(mnist, monkeypatch):
# Make sure that we can satisfy the grid search interface.
from nolearn.lasagne import NeuralNet
nn = NeuralNet(
layers=[],
X_tensor_type=T.matrix,
)
param_grid = {
'more_params': [{'hidden_num_units': 100}, {'hidden_num_units': 200}],
'update_momentum': [0.9, 0.98],
}
X, y = mnist
vars_hist = []
def fit(self, X, y):
vars_hist.append(vars(self).copy())
return self
with patch.object(NeuralNet, 'fit', autospec=True) as mock_fit:
mock_fit.side_effect = fit
with patch('nolearn.lasagne.NeuralNet.score') as score:
score.return_value = 0.3
gs = GridSearchCV(nn, param_grid, cv=2, refit=False, verbose=4)
gs.fit(X, y)
assert [entry['update_momentum'] for entry in vars_hist] == [
0.9, 0.9, 0.98, 0.98] * 2
assert [entry['more_params'] for entry in vars_hist] == (
[{'hidden_num_units': 100}] * 4 +
[{'hidden_num_units': 200}] * 4
)
def test_clone():
from nolearn.lasagne import NeuralNet
from nolearn.lasagne import negative_log_likelihood
from nolearn.lasagne import BatchIterator
params = dict(
layers=[
('input', InputLayer),
('hidden', DenseLayer),
('output', DenseLayer),
],
input_shape=(100, 784),
output_num_units=10,
output_nonlinearity=softmax,
more_params={
'hidden_num_units': 100,
},
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=False,
loss=negative_log_likelihood,
batch_iterator_train=BatchIterator(batch_size=100),
X_tensor_type=T.matrix,
y_tensor_type=T.ivector,
use_label_encoder=False,
on_epoch_finished=None,
on_training_finished=None,
max_epochs=100,
eval_size=0.1,
verbose=0,
)
nn = NeuralNet(**params)
nn2 = clone(nn)
params1 = nn.get_params()
params2 = nn2.get_params()
for ignore in (
'batch_iterator_train',
'batch_iterator_test',
'output_nonlinearity',
):
for par in (params, params1, params2):
par.pop(ignore, None)
assert params == params1 == params2
def test_lasagne_functional_regression(boston):
from nolearn.lasagne import NeuralNet
X, y = boston
nn = NeuralNet(
layers=[
('input', InputLayer),
('hidden1', DenseLayer),
('output', DenseLayer),
],
input_shape=(128, 13),
hidden1_num_units=100,
output_nonlinearity=identity,
output_num_units=1,
update_learning_rate=0.01,
update_momentum=0.1,
regression=True,
max_epochs=50,
)
nn.fit(X[:300], y[:300])
assert mean_absolute_error(nn.predict(X[300:]), y[300:]) < 3.0
| bsd-3-clause |
marionleborgne/nupic.research | htmresearch/frameworks/capybara/embedding.py | 7 | 3195 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import json
import numpy as np
import pandas as pd
def make_embedding(sdrs_chunk, aggregation):
if aggregation == 'or':
embedding = sdrs_chunk[0]
for sdr in sdrs_chunk:
embedding = np.logical_or(embedding, sdr)
elif aggregation == 'and':
embedding = sdrs_chunk[0]
for sdr in sdrs_chunk:
embedding = np.logical_and(embedding, sdr)
elif aggregation == 'mean':
embedding = np.mean(sdrs_chunk, axis=0)
else:
raise ValueError('Invalid aggregation name.')
return embedding
def make_embeddings(sdrs_sequence, aggregation, nb_chunks):
"""
Split a sequence of SDRs in chunks and create an embedding for each chunk.
:param sdrs_sequence: (array of arrays) a sequence of SDRs
:param aggregation: (str) type of aggregation
:param nb_chunks: (int) how many chunks in the SDRs sequence
:return: (array of arrays) embeddings
"""
chunk_size = len(sdrs_sequence) / nb_chunks
embeddings = []
for i in range(nb_chunks):
chunk = sdrs_sequence[i * chunk_size:(i + 1) * chunk_size]
embeddings.append(make_embedding(chunk, aggregation))
embeddings = np.array(embeddings)
return embeddings
def convert_to_embeddings(sdr_sequences, aggregation, nb_chunks):
sp_embeddings = []
for sdr_sequence in sdr_sequences.spActiveColumns.values:
sp_embeddings.append(make_embeddings(sdr_sequence, aggregation, nb_chunks))
sp_embeddings = np.array(sp_embeddings)
tm_embeddings = []
for sdr_sequence in sdr_sequences.tmPredictedActiveCells.values:
tm_embeddings.append(make_embeddings(sdr_sequence, aggregation, nb_chunks))
tm_embeddings = np.array(tm_embeddings)
return sp_embeddings, tm_embeddings
def save_embeddings(embeddings, labels, output_file_path):
assert len(embeddings) == len(labels)
df = pd.DataFrame(
data={'embedding': [json.dumps(e.tolist()) for e in embeddings],
'label': labels})
print df.head()
df.to_csv(output_file_path)
def reshape_embeddings(embeddings, nb_sequences, nb_chunks, sdr_width):
# Initial embeddings shape = (nb_sequences, nb_chunks, sdr_width)
# Final embeddings shape = (nb_sequences, nb_chunks * sdr_width)
return embeddings.reshape((nb_sequences, nb_chunks *sdr_width))
| agpl-3.0 |
revanthkolli/osf.io | scripts/analytics/addons.py | 15 | 2140 | # -*- coding: utf-8 -*-
import os
import re
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from website.app import init_app
from .utils import plot_dates, oid_to_datetime, mkdirp
log_collection = database['nodelog']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'addons')
mkdirp(FIG_PATH)
ADDONS = [
'github',
's3',
'figshare',
'dropbox',
'dataverse',
]
def get_collection_datetimes(collection, _id='_id', query=None):
query = query or {}
return [
oid_to_datetime(record[_id])
for record in collection.find({}, {_id: True})
]
def analyze_model(model):
dates = get_collection_datetimes(model._storage[0].store)
return {
'dates': dates,
'count': len(dates),
}
def analyze_addon_installs(name):
config = settings.ADDONS_AVAILABLE_DICT[name]
results = {
key: analyze_model(model)
for key, model in config.settings_models.iteritems()
}
return results
def analyze_addon_logs(name):
pattern = re.compile('^{0}'.format(name), re.I)
logs = log_collection.find({'action': {'$regex': pattern}}, {'date': True})
return [
record['date']
for record in logs
]
def analyze_addon(name):
installs = analyze_addon_installs(name)
for model, result in installs.iteritems():
if not result['dates']:
continue
fig = plot_dates(result['dates'])
plt.title('{} configurations: {} ({} total)'.format(name, model, len(result['dates'])))
plt.savefig(os.path.join(FIG_PATH, '{}-installs-{}.png'.format(name, model)))
plt.close()
log_dates = analyze_addon_logs(name)
if not log_dates:
return
fig = plot_dates(log_dates)
plt.title('{} actions ({} total)'.format(name, len(log_dates)))
plt.savefig(os.path.join(FIG_PATH, '{}-actions.png'.format(name)))
plt.close()
def main():
init_app(routes=False)
for addon in ADDONS:
if addon in settings.ADDONS_AVAILABLE_DICT:
analyze_addon(addon)
if __name__ == '__main__':
main()
| apache-2.0 |
trmznt/fatools | fatools/lib/fautil/_xxx/fautils.py | 2 | 43844 |
# re-imagining the peakutils
import numpy as np
from scipy.signal import find_peaks_cwt
from scipy.optimize import leastsq, curve_fit
from scipy.interpolate import UnivariateSpline
from matplotlib import pyplot as plt
from bisect import bisect_left
from operator import itemgetter
from pprint import pprint
from .dpalign import align_peaks, estimate_z
from .models import *
def set_debug(flag):
global __printdebug__
__printdebug__ = flag
def D( *msgs ):
if __printdebug__:
print( *msgs )
def filter_peak_number( initial_peaks, max_peak_number, ladder_number ):
""" filter peaks by height to get number of peaks
return [ (peaks, height), ... ]
"""
peak_sets = []
filtered_peaks = []
used_min_height = 0
peak_number = -1
# gather all possible peak lists
min_height = 0
while min_height < 5000:
min_height += 1
peaks = [ x for x in initial_peaks if x.height >= min_height ]
#print('min_height = %d, peaks = %d' % (min_height, len(peaks)))
if len(peaks) > max_peak_number:
# too much noises
continue
if peak_number < 0:
peak_number = len(peaks)
filtered_peaks = peaks
used_min_height = min_height
continue
if peak_number == len(peaks):
used_min_height = min_height
continue
if peak_number != len(peaks):
peak_sets.append( (used_min_height, filtered_peaks) )
if len(peaks) < ladder_number:
peak_sets.append( (min_height, peaks) )
break
peak_number = len(peaks)
filtered_peaks = peaks
used_min_height = min_height
return peak_sets
def filter_retention_time( ladders, peaks, parameter ):
""" return proper filtered peaks in reversed order
"""
if parameter.init_separation_time < 0:
separation_slope = 1/estimate_slope( ladders, peaks )
else:
separation_slope = parameter.init_separation_time
print("peak count: %d separation_slope: %2.3f" % (len(peaks), separation_slope))
min_spacing, max_spacing = spacing( ladders )
min_time_separation = min_spacing * separation_slope * 0.90
#print('min time separation: %6.3f' % min_time_separation)
peaks.reverse()
ladder_alleles = [ peaks[0] ]
idx = 1
while idx < len(peaks):
prev_peak = ladder_alleles.pop()
#print('comparing peaks: %d & %d' % (prev_peak.peak, peaks[idx].peak))
if prev_peak.rtime - peaks[idx].rtime > min_time_separation: # minimum time separation
ladder_alleles.append( prev_peak )
ladder_alleles.append( peaks[idx] )
else:
#print('=> range < min_time_separation')
if prev_peak.height > peaks[idx].height:
ladder_alleles.append( prev_peak )
#print('=> use peak %d' % prev_peak.peak)
else:
ladder_alleles.append( peaks[idx] )
#print('=> use peak %d' % peaks[idx].peak)
idx += 1
return ladder_alleles
def peak_fit_and_align( simple_fits, ladders ):
dp_fits = []
for (initial_z, initial_rss, initial_peaks, height, paired_peaks) in simple_fits[:6]:
print('=> initial RSS: %3.3f' % initial_rss)
dp_score, dp_rss, dp_z, dp_peaks, S, D = align_peaks( ladders,
initial_peaks,
initial_z = initial_z )
dp_fits.append( (dp_score, dp_rss, dp_z, dp_peaks, initial_peaks, height, S, D, paired_peaks) )
dp_fits.sort( key = itemgetter(1) )
dp_fits.sort( key = itemgetter(0), reverse=True )
return dp_fits[0]
def xxx_scan_ladder_peaks( channel, ladders, parameter = None ):
if parameter is None:
parameter = LadderScanningParameter()
parameter.min_height = min( parameter.ladder_height_win )
initial_peaks = scan_peaks( channel, parameter = parameter )
peak_sets = filter_peak_number( initial_peaks,
len(ladders) * 1.5,
len(ladders) )
# sanity check
if len(peak_sets) == 0 or len(peak_sets[0][1]) < len(ladders) * parameter.min_peak_number:
return (None, -1, 0,
('Not enough initial signals', 'Too much noises or too few signals'), [])
# use all peak_sets
print('peak_sets: %d' % len(peak_sets))
separated_peaksets = []
for (height, peaks) in peak_sets:
proper_peaks = filter_retention_time( ladders, peaks, parameter )
if not proper_peaks or ( len(proper_peaks) < len(ladders) * parameter.min_peak_number or
len(proper_peaks) > len(ladders) * 1.5 ):
continue
separated_peaksets.append( (height, proper_peaks) )
if not separated_peaksets:
return (None, -1, 0, ('Not enough filtered peaks', 'Bad peak separation time'), [])
print('separated peaks contains %d sets' % len(separated_peaksets))
simple_fits = []
for (height, peaks) in separated_peaksets:
print('=> height: %d, peaks: %d' % (height, len(peaks)))
fit_results = simple_fit( ladders, peaks )
for (z, rss, paired_peaks) in fit_results:
simple_fits.append( (z, rss, peaks, height, paired_peaks) )
simple_fits.sort( key = itemgetter(1) )
dp_fits = []
i = 0
optimal_dpscore = 0
optimal_rss = 1000
while simple_fits[i:i+3] and (optimal_rss > len(ladders)*1.5 and optimal_dpscore < len(ladders)):
dp_fit = peak_fit_and_align( simple_fits[i:i+3], ladders )
optimal_dpscore, optimal_rss = dp_fit[0], dp_fit[1]
dp_fits.append( dp_fit )
i += 3
if len(dp_fits) > 1:
dp_fits.sort( key = itemgetter(1) )
dp_fits.sort( key = itemgetter(0), reverse=True )
optimal_dpscore, optimal_rss, optimal_z, optimal_peaks, initial_peaks, height, S, D, paired_peaks = dp_fits[0]
score, reports = score_ladder( optimal_rss, len(optimal_peaks), len(ladders) )
reports.append('Height = %d' % height)
print('=> initial pairing:')
pprint(paired_peaks)
#np.savetxt('S.csv', S, delimiter=',')
#np.savetxt('D.csv', D, delimiter=',')
# set peaks to ladder-peak
optimal_peaks.sort()
for (s, p) in optimal_peaks:
p.size = p.value = s
p.type = 'peak-ladder'
p.method = 'allele-autoladder'
channel.alleles.append( p )
print('=> initial peaks')
pprint(initial_peaks)
print('=> aligned ladder peaks')
pprint(optimal_peaks)
print(' => aligned peaks: %d of %d ladders' % (len(optimal_peaks), len(ladders)))
return ( optimal_z, optimal_rss, score, optimal_dpscore, reports,
[ p for (s,p) in optimal_peaks ] )
def scan_ladder_peaks( channel, ladders, parameter = None ):
if parameter is None:
parameter = LadderScanningParameter()
parameter.min_height = min( parameter.ladder_height_win )
initial_peaks = scan_peaks( channel, parameter = parameter )
peak_sets = [] # (min_height, peaks)
filtered_peaks = []
used_min_height = 0
peak_number = -1
# gather all possible peak lists
if parameter.height > 0:
# just use peaks with this minimal height
peak_sets = [ (parameter.height,
list( p for p in initial_peaks if p.height > parameter.height ) ) ]
else:
peak_sets = filter_peak_number( initial_peaks,
len(ladders) * 2,
len(ladders) )
# sanity check
if len(peak_sets) == 0:
return (None, -1, 0, -1,
('Not enough initial signals', 'Too much noises or too few signals'), [])
pprint( peak_sets )
separated_peaks = []
for (height, peaks) in peak_sets:
proper_peaks = filter_retention_time( ladders, peaks, parameter )
if not proper_peaks or ( len(proper_peaks) < len(ladders) * parameter.min_peak_number or
len(proper_peaks) > len(ladders) * 1.5 ):
continue
separated_peaks.append( (len(proper_peaks), proper_peaks, height) )
if not separated_peaks:
return (None, -1, 0, -1,
('Not enough filtered peaks', 'Bad peak separation time'), [])
print('separated peaks contains %d sets' % len(separated_peaks))
best_fits = []
for (peak_len, proper_peaks, min_height) in separated_peaks:
dpscore, rss, z, aligned_peaks = adaptive_peak_alignment( ladders, proper_peaks )
#rss, z, aligned_peaks = align_peaks( ladders, proper_peaks )
best_fits.append( (dpscore, rss, z, aligned_peaks, min_height, proper_peaks) )
#for i in range(len(best_fits)):
# rss, z, aligned_peaks, height, proper_peaks = best_fits[i]
# dp_rss, dp_z, dp_peaks = align_peaks( ladders, proper_peaks, aligned_peaks )
# if dp_rss < rss:
# print(' => DP alignment replaced RSS: %3.2f -> %3.2f' % ( rss, dp_rss ))
# best_fits[i] = (dp_rss, dp_z, dp_peaks, height, proper_peaks)
best_fits.sort( key = itemgetter(1) )
best_fits.sort( key = itemgetter(0), reverse=True )
print(' => best fits:')
for (dpscore, rss, z, peaks, height, proper_peaks) in best_fits:
print(' dpscore: %3.3f rss: %3.2f peaks: %2d total peaks: %2d' %
(dpscore, rss, len(peaks), len(proper_peaks)) )
pprint(peaks)
optimal_dpscore, optimal_rss, optimal_z, optimal_peaks, height, proper_peaks = best_fits[0]
score, reports = score_ladder( optimal_rss, len(optimal_peaks), len(ladders) )
reports.append('Height = %d' % height)
# set peaks to ladder-peak
optimal_peaks.sort()
for (s, p) in optimal_peaks:
p.size = p.value = s
p.type = 'peak-ladder'
for p in proper_peaks:
p.method = 'allele-autoladder'
if p.value is None:
p.size = p.value = -1
p.type = 'peak-unassigned'
channel.alleles.append( p )
pprint(proper_peaks)
print(' => aligned peaks: %d of %d ladders' % (len(optimal_peaks), len(ladders)))
return (optimal_z, optimal_rss, score, optimal_dpscore, reports, [ p for (s,p) in optimal_peaks ])
## NOT USED FROM HERE
# use peaks & min_height later on
rss, z, aligned_peaks = align_peaks( ladders, filtered_peaks )
score, reports = score_ladder( rss, len(aligned_peaks), len(ladders) )
reports.append('Height > %d' % used_min_height)
# set peaks to ladder-peak
aligned_peaks.sort()
for (s, p) in aligned_peaks:
p.size = p.value = s
p.type = 'peak-ladder'
p.method = 'allele-autoladder'
channel.alleles.append( p )
print('=> aligned peaks: %d of %d ladders' % (len(aligned_peaks), len(ladders)))
return (z, rss, score, reports, [ p for (s,p) in aligned_peaks ])
def scan_ladder_peaks_xxx( channel, ladders, parameter = None):
""" scan and assign ladder peaks
ladders: [ 100, 150, 200, ... ]
return: (z, rss, score, report, assigned_peaks)
"""
if parameter is None:
parameter = LadderScanningParameter()
parameter.min_height = min( parameter.ladder_height_win )
initial_peaks = scan_peaks( channel, parameter = parameter )
filtered_peaks = []
prev_peak_len = -1
for min_height in parameter.ladder_height_win:
peaks = [ x for x in initial_peaks if x.height >= min_height ]
if len(peaks) == prev_peak_len:
continue
if len(peaks) > len(ladders) * 2:
# too much noises
continue
if len(peaks) < len(ladders) * parameter.min_peak_number:
# not much signals
break
filtered_peaks.append( (min_height, peaks) )
prev_peak_len = len(peaks)
if not filtered_peaks:
# no available data
return (None, -1, 0,
('Not enough initial signals', 'Too much noises or too few signals'), [])
# debugging
if True:
for fp in filtered_peaks:
print('height = %d, peaks = %d' % (fp[0], len(fp[1])))
for peak in fp[1]:
print(peak)
separated_peaks = []
for (min_height, peaks) in filtered_peaks:
proper_peaks = filter_retention_time( ladders, peaks, parameter )
if not proper_peaks or len(proper_peaks) < len(ladders) * parameter.min_peak_number:
continue
separated_peaks.append( (len(proper_peaks), proper_peaks, min_height) )
if not separated_peaks:
return (None, -1, 0, ('Not enough filtered peaks', 'Bad peak separation time'), [])
ladders = list(reversed(ladders))
best_fits = []
for (peak_len, proper_peaks, min_height) in separated_peaks:
rss, z, aligned_peaks = adaptive_peak_alignment( ladders, proper_peaks )
#rss, z, aligned_peaks = align_peaks( ladders, proper_peaks )
best_fits.append( (rss, z, aligned_peaks, min_height, proper_peaks) )
best_fits.sort( key = itemgetter(0) )
optimal_rss, optimal_z, optimal_peaks, min_height, proper_peaks = best_fits[0]
dp_rss, dp_z, dp_peaks = align_peaks( ladders, proper_peaks, optimal_peaks )
if dp_rss < optimal_rss:
optimal_rss, optimal_z, optimal_peaks = dp_rss, dp_z, dp_peaks
print(' => DP alignment replaced optimal RSS')
score, reports = score_ladder( optimal_rss, len(optimal_peaks), len(ladders) )
reports.append('Height > %d' % min_height)
# set peaks to ladder-peak
optimal_peaks.sort()
for (s, p) in optimal_peaks:
p.size = p.value = s
p.type = 'peak-ladder'
p.method = 'allele-autoladder'
channel.alleles.append( p )
print('=> aligned peaks: %d of %d ladders' % (len(optimal_peaks), len(ladders)))
pprint(optimal_peaks)
return (optimal_z, optimal_rss, score, reports, [ p for (s,p) in optimal_peaks ])
def filter_separation_time( ladders, peaks, parameter ):
""" return proper filtered peaks in reversed order
"""
separation_slope = estimate_slope( ladders, peaks )
min_spacing, max_spacing = spacing( ladders )
min_time_separation = min_spacing * separation_slope * 0.90
#print('min time separation: %6.3f' % min_time_separation)
peaks.reverse()
ladder_alleles = [ peaks[0] ]
idx = 1
while idx < len(peaks):
prev_peak = ladder_alleles.pop()
#print('comparing peaks: %d & %d' % (prev_peak.peak, peaks[idx].peak))
if prev_peak.peak - peaks[idx].peak > min_time_separation: # minimum time separation
ladder_alleles.append( prev_peak )
ladder_alleles.append( peaks[idx] )
else:
#print('=> range < min_time_separation')
if prev_peak.height > peaks[idx].height:
ladder_alleles.append( prev_peak )
#print('=> use peak %d' % prev_peak.peak)
else:
ladder_alleles.append( peaks[idx] )
#print('=> use peak %d' % peaks[idx].peak)
idx += 1
return ladder_alleles
def adaptive_peak_alignment( ladders, peaks ):
""" return (rss, z, assigned_peaks)
"""
# sort in reverse order
ladders = sorted( ladders, reverse = True )
peaks = sorted( peaks, key = lambda x: x.rtime, reverse = True )
adaptive_results = []
for i in range(-3, 8):
if i >= 0:
result_peaks = list(zip(ladders[i:], peaks))
else:
result_peaks = list(zip(ladders, peaks[ abs(i): ]))
z, rss = estimate_z( result_peaks )
adaptive_results.append( (rss, z, result_peaks) )
#print('i=%d, rss=%4.3f' % (i, rss))
# we use the smallest rss
adaptive_results.sort()
dp_results = []
# only use the best 3 RSS result
for (rss, z, aligned_peaks) in adaptive_results[:3]:
#if rss > 5000:
# too much deviation, just ignore
# continue
# perform dynamic programming alignment
dp_score, dp_rss, dp_z, dp_peaks, S, D = align_peaks( ladders, peaks, aligned_peaks )
if dp_rss < rss:
print(' => DP alignment replaced RSS: %3.2f -> %3.2f' % ( rss, dp_rss ))
dp_results.append( (dp_score, dp_rss, dp_z, dp_peaks) )
#dp_results.append( (rss, z, aligned_peaks) )
if len(dp_results) <= 0:
return adaptive_results[0]
dp_results.sort( key = itemgetter(1) )
dp_results.sort( key = itemgetter(0), reverse=True )
return dp_results[0]
def scan_ladder_peaks_xxx( channel, ladders, parameter=None):
""" return a list of ladder alleles
ladders: [ 100, 150, 200, ... ]
return: (z, rss, score, report, ladder_alleles)
"""
if parameter is None:
parameter = LadderScanningParameter()
parameter.min_height = min( parameter.ladder_height_win )
minheight_peaks = scan_peaks( channel, method=None, parameter=parameter )
prev_peaks = []
last_peak_no = -1
min_spacing, max_spacing = spacing( ladders )
# find the proper height to get the optimum number of peaks
for min_height in parameter.ladder_height_win:
print('processing for min_height = %d' % min_height)
peaks = [ peak for peak in minheight_peaks if peak.height >= min_height ]
if len(peaks) > len(ladders) + 25:
# too much noise
print('too much noises')
continue
if len(peaks) < 5:
# too few signal
return (None, -1, 0, ("Too few signals",), None)
if last_peak_no == len(peaks):
# if current peak number is similar to above, just continue increase
# min_height
continue
last_peak_no = len(peaks)
print('initial peaks: %d' % len(peaks))
pprint( [ (x.peak, x.height) for x in peaks ] )
# based on the longest peak, estimate the minimum time separation range
separation_slope = estimate_slope( ladders, peaks )
min_time_separation = min_spacing * separation_slope * 0.90
print('min time separation: %6.3f' % min_time_separation)
peaks.reverse()
ladder_alleles = [ peaks[0] ]
idx = 1
while idx < len(peaks):
prev_peak = ladder_alleles.pop()
#print('comparing peaks: %d & %d' % (prev_peak.peak, peaks[idx].peak))
if prev_peak.peak - peaks[idx].peak > min_time_separation: # minimum time separation
ladder_alleles.append( prev_peak )
ladder_alleles.append( peaks[idx] )
else:
#print('=> range < min_time_separation')
if prev_peak.height > peaks[idx].height:
ladder_alleles.append( prev_peak )
#print('=> use peak %d' % prev_peak.peak)
else:
ladder_alleles.append( peaks[idx] )
#print('=> use peak %d' % peaks[idx].peak)
idx += 1
peaks = ladder_alleles
print('after range filtering: %d' % len(peaks))
print( [x.peak for x in peaks] )
print('min height: %d -- peaks: %d' % (min_height, len(peaks)))
if len(peaks) <= len(ladders):
if not prev_peaks:
if len(ladders) - len(peaks) >= 10:
return ( None, -1, 0, [ 'Missing peaks > 10' ], None )
break
# check which number is smaller, additional peaks or missing peaks
if abs(len(peaks) - len(ladders)) > abs(len(prev_peaks) - len(ladders)):
print('using previous peaks!!')
peaks = prev_peaks
break
prev_peaks = peaks
#peaks = list(reversed(peaks))
print('ASSIGNING PEAKS')
print('ladder peaks: %d of %d' % (len(peaks), len(ladders)))
# perform sizing
# assumption:
# - no drop peaks
# - the maximum number of missing peaks at the later retention time is 5 (shifted out)
#peaks = align_ladder_peaks( ladders, ladder_alleles )
#return
ladders = list( reversed( ladders ) )
peaks = list( sorted( peaks, key = lambda x: x.peak, reverse=True) )
adaptive_results = []
for i in range(-3, 8):
if i >= 0:
result_peaks = list(zip(ladders[i:], peaks))
else:
result_peaks = list(zip(ladders, peaks[ abs(i): ]))
pprint( result_peaks )
z, rss = estimate_z( result_peaks )
adaptive_results.append( (result_peaks, z, rss) )
print('i=%d, rss=%4.3f' % (i, rss))
# we use the smallest rss
adaptive_results.sort( key = itemgetter(2) )
(optimal_peaks, optimal_z, optimal_rss) = adaptive_results[0]
optimal_peaks = list(optimal_peaks)
# scoring
quality_score = 1.0
reports = []
if optimal_rss <= 0:
quality_score -= 0.25
reports.append( 'QC RSS < 0' )
if optimal_rss > 50:
quality_score -= 0.05
reports.append( 'QC RSS > 50' )
if optimal_rss > 255:
quality_score -= 0.10
reports.append( 'QC RSS > 255')
if optimal_rss > 1024:
quality_score -= 0.10
reports.append( 'QC RSS > 1024' )
missing_peaks = len(ladders) - len(optimal_peaks)
if missing_peaks > 0:
quality_score -= 0.025 * (missing_peaks - 1)
reports.append( 'QC missing %d peaks' % missing_peaks )
reports.reverse()
# assign peaks
optimal_peaks.sort()
for (s, p) in optimal_peaks:
p.size = p.value = s
p.type = 'peak-ladder'
p.method = 'allele-autoladder'
channel.alleles.append( p )
return (optimal_z, optimal_rss, quality_score, reports, [ p for (s,p) in optimal_peaks ])
def estimate_z_xxx( peak_pairs ):
""" estimate z and rss based on [y, x]
x: peaks
y: ladder sizes
return (z, rss)
"""
x, y = [], []
for (s, p) in peak_pairs:
x.append( p.rtime )
y.append( s )
z = np.polyfit( x, y, 3 )
p = np.poly1d( z )
y_p = p(x)
rss = ( (y_p - y) ** 2 ).sum()
return z, rss
def score_ladder( optimal_rss, peak_len, ladder_len):
""" return ladder score
"""
# scoring
quality_score = 1.0
reports = []
if optimal_rss <= 0:
quality_score -= 0.25
reports.append( 'QC RSS < 0' )
if optimal_rss > 50:
quality_score -= 0.05
reports.append( 'QC RSS > 50' )
if optimal_rss > 255:
quality_score -= 0.10
reports.append( 'QC RSS > 255')
if optimal_rss > 1024:
quality_score -= 0.10
reports.append( 'QC RSS > 1024' )
missing_peaks = ladder_len - peak_len
if missing_peaks > 0:
quality_score -= 0.025 * (missing_peaks - 1)
reports.append( 'QC missing peaks = %d' % missing_peaks )
reports.reverse()
return quality_score, reports
def scan_peaks( channel, parameter=None ):
""" return a list of alleles after scanned
"""
if parameter is None:
parameter = ScanningParameter()
peaks = find_peaks( channel.data, parameter, channel.data )
# peaks is [ (x, height, area), ... ]
if parameter.max_peak_number > 0:
min_height = 0
while len(peaks) > parameter.max_peak_number and min_height < 100:
min_height += 1
peaks = [ p for p in peaks if p[1] > min_height ]
# create alleles based on these peaks
alleles = []
for peak in peaks:
right_skew = peak[4] - peak[0]
if right_skew == 0:
right_skew = 1
left_skew = peak[0] - peak[3]
if left_skew == 0:
left_skew = 1
allele = channel.get_allele_class()( rtime = int(peak[0]),
height = int(peak[1]),
area = peak[2],
brtime = peak[3],
ertime = peak[4],
wrtime = peak[4]-peak[3],
srtime = right_skew/left_skew,
beta = peak[2]/int(peak[1])
)
allele.type = 'peak-scanned'
allele.method = 'binning-unavailable'
alleles.append( allele )
return alleles
def call_peaks_XXX( channel, method, parameter=None, peaks=None):
pass
if method is None:
# presumably ladder channel
# just return the alleles
return alleles
else:
# estimate size of the alleles
for allele in alleles:
estimate_allele_size( allele, method )
# remove small fragment size
alleles = [ allele for allele in alleles if allele.size > parameter.min_size ]
# classify alleles to peak-stutter or peak-called
check_stutter_peaks( alleles, threshold = parameter.stutter_threshold )
return alleles
def call_peaks( alleles, method, ladders, parameter=None):
""" return a list of called (sized) alleles
"""
called_alleles = []
for allele in alleles:
# only call peaks within the ladders, otherwise just assigned -1 to the value
if not ( ladders[0].rtime < allele.rtime < ladders[-1].rtime ):
allele.size = -1
allele.type = 'peak-unassigned'
continue
size = float(method(allele.rtime))
if np.isnan(size):
allele.size = -1
else:
allele.size = size
allele.value = round(allele.size)
allele.method = 'binning-unavailable'
# remove small fragment size
if allele.size < parameter.min_size or allele.size > parameter.max_size:
allele.type = 'peak-unassigned'
continue
allele.type = 'peak-called'
called_alleles.append( allele )
# classify alleles to peak-stutter or peak-called for the list of alleles that
# were not peak-unassigned
check_stutter_peaks( called_alleles, threshold = parameter.stutter_threshold )
return called_alleles
def bin_peaks( channel, marker ):
""" bin peaks that have been called (have peak size > 0),
return the list of alleles, after allele has been set as peak-bin or peak-artifact
"""
alleles = list(channel.alleles)
for allele in alleles:
if allele.size > 0:
if allele.type != 'peak-overlap':
if allele.type != 'peak-called':
allele.type = 'peak-called'
else:
continue
#if allele.type == 'peak-unassigned':
# continue
if not marker.min_size < allele.size < marker.max_size:
allele.type == 'peak-unassigned'
continue
if allele.type not in ['peak-called', 'peak-overlap']:
continue
size = allele.size
binlist = marker.bins
binpos = list( x[0] for x in binlist )
threshold = float(marker.repeats) / 2 * 1.5
pos = bisect_left( binpos, size )
if pos == 0:
value = binlist[0]
elif pos == len(binlist):
value = binlist[-1]
else:
before = binlist[pos - 1]
after = binlist[pos]
if after[0] - size < size - before[0]:
value = after
else:
value = before
if abs(value[0] - size) > threshold:
print('WARN: binned peak with size: %3.2f for value: %3.2f is above range threshold: %2.1f'
% (size, value[0], threshold) )
allele.value = value[1]
allele.type = 'peak-bin'
def check_stutter_peaks( alleles, threshold ):
""" assign allele.type to peak-stutter or peak-called
"""
# sort the alleles to ensure it is sorted !
alleles.sort( key = lambda x: x.size )
for idx in range( len(alleles) ):
allele = alleles[idx]
if idx > 0:
allele_0 = alleles[idx-1]
if allele.size - allele_0.size < threshold:
if allele_0.height > allele.height:
allele.type = 'peak-stutter'
if idx < len(alleles) - 1:
allele_1 = alleles[idx+1]
if allele_1.size - allele.size < threshold:
if allele_1.height > allele.height:
allele.type = 'peak-stutter'
def is_overlap(peak_1, peak_2):
if peak_1.brtime > peak_2.brtime:
peak_1, peak_2 = peak_2, peak_1
if peak_2.brtime < peak_1.ertime:
return True
return False
def check_overlap_peaks( channels, threshold ):
""" assign allele.type to peak-overlap
"""
channel_peaks = [ list(channel.alleles) for channel in channels ]
for i, peaks in enumerate( channel_peaks ):
for j in range( len(peaks) ):
peak = peaks[j]
if peak.type not in ['peak-called', 'peak-bin']:
continue
for k in range( len(channel_peaks) ):
if k == i:
continue
for peak_r in channel_peaks[k]:
if peak_r.type not in ['peak-called', 'peak-bin']:
continue
if is_overlap( peak, peak_r ):
# find whether the height of any peak is inside any peak
channel = peak.alleleset.channel
channel_r = peak_r.alleleset.channel
if (peak.height < channel_r.data[ peak.rtime ] or
peak_r.height < channel.data[ peak_r.rtime ]):
if peak.height < peak_r.height:
peak.type = 'peak-overlap'
#if abs(peak.size - peak_r.size) < threshold:
# if peak.height < peak_r.height:
# peak.type = 'peak-overlap'
def find_peaks( signal, params, ssignal = None ):
""" find peaks from signal
returns [ (peak, height, area), ... ]
TODO: this method can be perform remotely!
"""
#print('Finding peaks with min_height = %d' % params.min_height)
# find all peaks by cwt-based algorithm with Signal-Noise Ratio = 1
indices = find_peaks_cwt( signal, params.cwt_widths, min_snr = params.cwt_min_snr )
print('find_peaks_cwt() found peaks: %d' % len(indices))
if not indices:
return []
# filter for absolute heights
raw_peaks = []
for idx in indices:
for i in range(3, -1, -1):
try:
height, index = max( [ (signal[i], i) for i in range(idx-3, idx+3) ] )
except IndexError:
continue
break
if height < params.min_height:
continue
if index < 0:
continue
raw_peaks.append( (index, height) )
#pprint(raw_peaks)
# check for any peaks
if not raw_peaks:
return raw_peaks
# filter for relative heights with median of peak height
if params.min_relative_ratio > 0 or params.max_relative_ratio > 0:
med = np.median( list(p[1] for p in raw_peaks) )
if params.min_relative_ratio > 0:
median_min = med * params.min_relative_ratio
raw_peaks = [ p for p in raw_peaks if p[1] > median_min ]
if params.max_relative_ratio > 0:
median_max = med * params.max_relative_ratio
raw_peaks = [ p for p in raw_peaks if p[1] < median_max ]
if not raw_peaks:
return raw_peaks
# filter for minimum height ratio
if params.min_height_ratio > 0:
min_height = max( list( p[1] for p in raw_peaks) ) * params.min_height_ratio
raw_peaks = [ p for p in raw_peaks if p[1] > min_height ]
if ssignal is None:
ssignal = smooth_signal( signal )
# calculate area
threshold = np.percentile( ssignal, 75 )
if threshold < 0:
threshold = -1
peaks = []
#pprint(raw_peaks)
for (peak, height) in raw_peaks:
area, brtime, ertime = calculate_area( ssignal, peak, 5e-2 )
peaks.append( (peak, height, area, brtime, ertime) )
#print('raw peaks after height filtering:')
#pprint( [ (x[0], x[1], x[2]) for x in peaks ] )
# filter for peak area
return peaks
def calculate_area_xxx(y, p, threshold):
area = 0
x = p + 1
if x < len(y):
h = y[x]
while h > threshold:
area += h
x = x + 1
if x >= len(y):
break
h = y[x]
x = p - 1
if x >= 0:
h = y[x]
while h > threshold:
area += h
x = x - 1
if x < 0:
break
h = y[x]
area += y[ p ]
return int(area)
def calculate_area(y, t, threshold):
""" return (area, brtime and ertime)
"""
# right area
data = y[t:]
r_area, ertime, r_shared = half_area(data, threshold)
# left area
data = y[:t+1][::-1]
l_area, brtime, l_shared= half_area(data, threshold)
return ( l_area + r_area - y[t], t - brtime, ertime + t )
def half_area(y, threshold):
""" return (area, ertime)
"""
winsize = 3
threshold = threshold/2
shared = False
area = y[0]
edge = float(np.sum(y[0:winsize]))/winsize
old_edge = 2 * edge
index = 1
limit = len(y)
while edge > area * threshold and edge < old_edge and index < limit:
old_edge = edge
area += y[index]
edge = float(np.sum(y[index:index+winsize]))/winsize
index += 1
if edge >= old_edge:
shared = True
index -= 1
return area, index, shared
def spaces( a ):
""" return spaces between members
"""
return [ j-i for i,j in zip( a[:-1], a[1:] ) ]
def spacing( a ):
""" return the min and max length between consecutive elements """
spaces = [ j-i for i,j in zip( a[:-1], a[1:] ) ]
return min( spaces ), max( spaces )
def least_square( z ):
""" 3rd order polynomial resolver
"""
return np.poly1d( z )
def cubic_spline( ladder_alleles ):
""" cubic spline interpolation
x is peaks, y is standard size
"""
ladder_alleles = list(sorted(ladder_alleles, key = lambda x: x.size))
ladder_peaks = []
ladder_sizes = []
for ladder_allele in ladder_alleles:
ladder_peaks.append( ladder_allele.rtime )
ladder_sizes.append( ladder_allele.size )
return UnivariateSpline(ladder_peaks, ladder_sizes, s=3)
def estimate_allele_size( allele, method ):
""" estimate allele size based on the method
"""
allele.size = float(method(allele.rtime))
allele.value = int(allele.size)
allele.type = 'peak-called'
allele.method = 'binning-unavailable'
def align_ladder_peaks_xxx( ladders, peaks, indels_penalty = (0,0) ):
""" align ladders & peaks
return: [ (ladder_size, peak), ... ]
"""
# sort ladders & peaks in reverse orders
ladders = reversed( sorted(ladders) )
peaks = reversed( sorted( peaks, key = lambda x: x.peak ) )
# create a normalized data structure
norm_ladders = [ (x/ladders[0], x) for x in ladders ]
norm_peaks = [ (x.peak/peaks[0].peak, x) for x in peaks ]
# matrix n+1 * m+1, n ~ length of ladders, m ~ length of peaks
# matrix scoring
n = len(norm_ladders) + 1
m = len(norm_peaks) + 1
S = np.zeros((n,m)) # score
T = np.zeros((n,m)) # traceback
def estimate_slope(ladders, peaks):
""" estimate slope based on 50% sample data """
# sort ladders & peaks in reverse orders
ladders = list(reversed( sorted(ladders) ))
peaks = list(reversed( sorted( peaks, key = lambda x: x.rtime ) ))
# get 25% - 75% sample size
N_ladders = len(ladders)
N_peaks = len(peaks)
range_ladders = sorted( spaces( ladders ) )
range_peaks = sorted( spaces( [ x.rtime for x in peaks ] ) )
sampled_range_ladders = range_ladders[int(N_ladders * 0.33) : int(N_peaks * 0.66)]
sampled_range_peaks = range_peaks[int(N_peaks * 0.33) : int(N_peaks * 0.66) ]
slope = np.mean( sampled_range_peaks ) / np.mean( sampled_range_ladders )
return 1.0/slope
def align_ladder_peaks( ladders, peaks ):
""" align ladders & peaks
return [ (ladder_size, peak), ... ]
"""
# sort ladders & peaks in reverse orders
ladders = list(reversed( sorted(ladders) ))
peaks = list(reversed( sorted( peaks, key = lambda x: x.peak ) ))
# get 25% - 75% sample size
N_ladders = len(ladders)
N_peaks = len(peaks)
range_ladders = sorted( spaces( ladders ) )
range_peaks = sorted( spaces( [ x.peak for x in peaks ] ) )
sampled_range_ladders = range_ladders[int(N_ladders * 0.25) : int(N_peaks * 0.75)]
sampled_range_peaks = range_peaks[int(N_peaks * 0.25) : int(N_peaks * 0.75) ]
slope = np.mean( sampled_range_peaks ) / np.mean( sampled_range_ladders )
# linear curve fit with the approximate slope
# y -> peaks, x -> size
initial_y = make_linear_func( slope )
# iteration for simulating missing last peak(s)
for i in range(-3, 3):
z, rss, peak_assignment = fit_ladder_peaks( ladders, peaks, initial_y, i )
print('RESULTS:')
print(z, rss)
pprint(peak_assignment)
def fit_ladder_peaks( ladders, peaks, initial_y, index):
# initial fit firs (linear curve fit)
param, covar, rss = linear_fit_ladder_peaks( ladders, peaks, initial_y, index )
# reassign ladders with peaks for consecutive peakfit
peak_assignment = assign_ladder_peak( ladders, peaks, lambda x: initial_y(x, param) )
# use polynomial_fit
z, rss = estimate_z( peak_assignment )
return (z, rss, peak_assignment)
def assign_ladder_peak( ladders, peaks, func_y ):
assigned_peaks = {}
for ladder_size in ladders:
y_ladder = func_y( ladder_size )
peak, delta = find_closest_peak( peaks, y_ladder )
if peak in assigned_peaks:
(other_ladder, other_delta) = assigned_peaks[peak]
if other_delta > delta:
assigned_peaks[peak] = (ladder_size, delta)
else:
assigned_peaks[peak] = (ladder_size, delta)
peak_assignment = [ (v[0], k) for (k, v) in assigned_peaks.items() ]
peak_assignment.sort()
return peak_assignment
def find_closest_peak( peaks, y ):
min_d = abs(peaks[0].peak - y)
min_peak = peaks[0]
for peak in peaks[1:]:
delta = abs(peak.peak - y)
if delta < min_d:
min_d = delta
min_peak = peak
return min_peak, min_d
def linear_fit_ladder_peaks( ladders, peaks, initial_y, idx):
if idx >= 0:
ladders = ladders[idx:]
else:
peaks = peaks[ abs(idx): ]
data_length = min( len(ladders), len(peaks) )
p0 = 1
x = np.array( ladders[:data_length] )
y = np.array( [ p.peak for p in peaks ][:data_length] )
#linsq = leastsq( initial_residuals, p0, args = ( x, y) )
param, covar = curve_fit( initial_y, x, y, p0 )
# calculate RSS
y_p = initial_y(x, param)
print('y_p:', y_p)
print('x:', x)
rss = ( (y_p - y) ** 2 ).sum()
return (param, covar, rss)
# plot the fitt
print('idx = %d, param = %8.3f, covar = %8.3f, rss = %8.3f' % (idx, param, covar, rss))
x_lines = np.linspace( x[0], x[-1], 1000 )
y_lines = initial_y(x_lines, param)
plt.figure()
plt.plot( x, y, 'ro', x_lines, y_lines )
plt.savefig('plot#%d.png' % idx)
# based on this linsq, reassigned all peaks
#raise RuntimeError(linsq)
return (param, covar, rss)
def make_linear_func( slope = 1 ):
def _y(x, B):
return (x * slope) + B
return _y
def make_residual_func( linear_func ):
def _residuals(B, y, x):
return (y - linear_func(x, B))
return _residuals
def linear_fit(ladders, peaks):
""" make a linear fit on all possible alignment
return [ (z, rss), ... ]
"""
# assume that ladders and peaks are already sorted in ascending orders
# calculate the linear slope
A = estimate_slope( ladders, peaks )
linear_func = make_linear_func( A )
print('=> estimated linear slope: %3.3f with peaks: %d' % (A, len(peaks)))
# fit the slope to data
linear_fits = [] # store [ (z, rss) ]
for i in range(-10, 10):
param, rss = align_and_fit( ladders, peaks, linear_func, i )
linear_fits.append( ( np.array([A, param[0]]), rss ) )
linear_fits.sort( key = itemgetter(1) )
return linear_fits
def align_and_fit( ladders, peaks, func, idx ):
""" align and fit the ladders to peaks
func -> size[bp] = func( rtime[sec] )
x : rtime
y : size or ladder
"""
if idx >= 0:
ladders = ladders[idx:]
else:
peaks = peaks[ abs(idx): ]
data_length = min( len(ladders), len(peaks) )
p0 = 1
y = np.array( ladders[:data_length] )
x = np.array( [ p.rtime for p in peaks ][:data_length] )
#linsq = leastsq( initial_residuals, p0, args = ( x, y) )
param, covar = curve_fit( func, x, y, p0 )
# calculate RSS
y_p = func(x, param)
#print('y_p:', y_p)
#print('x:', x)
rss = ( (y_p - y) ** 2 ).sum()
return (param, rss)
def simple_fit(ladders, peaks):
""" return [ (z, rss), ... ] """
fits = []
for idx in range(-5, +5):
if idx >= 0:
pair_peaks = list(zip( ladders[idx:], peaks ))
else:
pair_peaks = list(zip( ladders, peaks[ abs(idx): ] ))
z, rss = estimate_z( pair_peaks )
fits.append( (z, rss, pair_peaks) )
#fits.sort( key = itemgetter(1) )
return fits
# ---------------------------------------------------------------------------------------
# new strategy for peak alignment
#
def find_ladder_peak(channel, N, parameter=None):
""" return peakset with max_N number
return values: [ (used_peak_height, peaks), ... ]
"""
if parameter is None:
parameter = LadderScanningParameter()
parameter.min_height = min( parameter.ladder_height_win )
initial_peaks = scan_peaks( channel, parameter = parameter )
# gather all possible peak lists
if parameter.height > 0:
# just use peaks with this minimal height
return [ (parameter.height,
list( p for p in initial_peaks if p.height > parameter.height ) ) ]
else:
return = filter_peak_number( initial_peaks,
N + parameter.additional_peaks,
N )
def scan_ladder_peak_2(channel, ladder, parameter=None, find_peaks = True):
if parameter = None:
parameter = LadderScanningParameter()
if find_peaks:
peak_sets = find_ladder_peak( channel, len(ladder), parameter )
else:
# use the old peaks, set all into peak-unassigned, and redo the peak alignment
pass
results = []
for peak_set in peak_sets:
results.append( align_ladder_peaks( peak_set, ladder, parameter ) )
# find similarity using PCA-based similarity
# assign the first peak set for the first iteration of DP
# do the DP alignment, get the result
# use eigenvalues of cosine product to evaluate peak similarity
# for each results, find the least RSS and the highest DP score and quality score
# set the peaks for peak-ladders
def align_ladder_peaks( peak_set, ladder, parameter ):
pass
| lgpl-3.0 |
pepcio03/python | pdftosplitandpdf.py | 1 | 2753 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pdfsplitandpdf.py
#
# Copyright 2014 pepcio <piotrk0303@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from scipy import misc as spm
from scipy import ndimage
import scipy as sp
from wand.image import Image
from PIL import Image as pimage
from wand.display import display
from pyPdf import PdfFileWriter,PdfFileReader
#~ import matplotlib.pyplot as plt
#~ from skimage import data, filter
#~ from pytesseract import image_to_string
def proguj(ob, prog):
ob_wyj = sp.zeros(ob.shape)
ob_wyj[sp.where(ob>prog)]=1
return ob_wyj
def main():
output = PdfFileWriter()
temp = PdfFileReader(file('org.pdf','rb'))
#~ #numeracja
for i in range(0,(temp.getNumPages()-1)):
if i<10:
w = '00'+str(i)
elif i>=10 and i<100:
w = '0'+str(i)
elif i>=100 and i<1000:
w = str(i)
#wczytanie strony pdf
#~ i = 10
img = Image(filename="org.pdf[" + str(i) + "]", resolution=300)
img.type = 'bilevel';
imga = img[:(int(img.width/2)),:]
#~ print imga.make_blob()
imgb = img[(int(img.width/2)):,:]
#~ display(imga)
#~ img1 = imga[500:750,500:750]
#~ imga.save(filename="temp/temp1.png")
#~ img1 = ndimage.imread('temp/temp1.png')
#~ print image_to_string(pimage.open('temp/temp1.png'),lang='pol')
#~ img3 = ndimage.binary_erosion(img1, structure = el).astype(img1.dtype)
#~ img2 = ndimage.median_filter(img1, 20)
#~ print dir(img2)
#~ spm.imsave("temp/temp2.png",img2)
#######Histogram
#~ plt.figure('histogram szarosci')
#~ plt.hist(img1.ravel(),255) # narysujmy histogram odcieni
#~ plt.show()
########
#~ spm.imsave("temp/temp3.png",img3)
#~ #tworzenie pdf z podzielonych stron
for j in 'a','b':
if j == 'a':
imga.save(filename="temp/temp.pdf")
if j == 'b':
imgb.save(filename="temp/temp.pdf")
temp = PdfFileReader(file('temp/temp.pdf','rb'))
output.addPage(temp.getPage(0))
outputStream = file("outputa.pdf", "wb")
output.write(outputStream)
outputStream.close()
print i
return 0
if __name__ == '__main__':
main()
| gpl-2.0 |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/mpl_examples/user_interfaces/wxcursor_demo.py | 4 | 2166 | """
Example to draw a cursor and report the data coords in wx
"""
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
from numpy import arange, sin, pi
import wx
class CanvasFrame(wx.Frame):
def __init__(self, ):
wx.Frame.__init__(self,None,-1,
'CanvasFrame',size=(550,350))
self.SetBackgroundColour(wx.NamedColor("WHITE"))
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
self.axes.plot(t,s)
self.axes.set_xlabel('t')
self.axes.set_ylabel('sin(t)')
self.figure_canvas = FigureCanvas(self, -1, self.figure)
# Note that event is a MplEvent
self.figure_canvas.mpl_connect('motion_notify_event', self.UpdateStatusBar)
self.figure_canvas.Bind(wx.EVT_ENTER_WINDOW, self.ChangeCursor)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.figure_canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(self.sizer)
self.Fit()
self.statusBar = wx.StatusBar(self, -1)
self.statusBar.SetFieldsCount(1)
self.SetStatusBar(self.statusBar)
self.toolbar = NavigationToolbar2Wx(self.figure_canvas)
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.toolbar.Show()
def ChangeCursor(self, event):
self.figure_canvas.SetCursor(wx.StockCursor(wx.CURSOR_BULLSEYE))
def UpdateStatusBar(self, event):
if event.inaxes:
x, y = event.xdata, event.ydata
self.statusBar.SetStatusText(( "x= " + str(x) +
" y=" +str(y) ),
0)
class App(wx.App):
def OnInit(self):
'Create the main window and insert the custom frame'
frame = CanvasFrame()
self.SetTopWindow(frame)
frame.Show(True)
return True
if __name__=='__main__':
app = App(0)
app.MainLoop()
| gpl-2.0 |
TinghuiWang/pyActLearn | examples/CASAS_Single_Test/b1_randomforest.py | 1 | 7254 | import os
import pickle
import logging
import argparse
from sklearn.ensemble import RandomForestClassifier
from datetime import datetime
from pyActLearn.CASAS.data import CASASData
from pyActLearn.CASAS.fuel import CASASFuel
from pyActLearn.performance.record import LearningResult
from pyActLearn.performance import get_confusion_matrix
logger = logging.getLogger(__file__)
def training_and_test(token, train_data, test_data, num_classes, result):
"""Train and test
Args:
token (:obj:`str`): token representing this run
train_data (:obj:`tuple` of :obj:`numpy.array`): Tuple of training feature and label
test_data (:obj:`tuple` of :obj:`numpy.array`): Tuple of testing feature and label
num_classes (:obj:`int`): Number of classes
result (:obj:`pyActLearn.performance.record.LearningResult`): LearningResult object to hold learning result
"""
model = RandomForestClassifier(n_estimators=20, criterion="entropy")
model.fit(train_data[0], train_data[1].flatten())
# Test
predicted_y = model.predict(test_data[0])
predicted_proba = model.predict_proba(test_data[0])
# Evaluate the Test and Store Result
confusion_matrix = get_confusion_matrix(num_classes=num_classes,
label=test_data[1].flatten(), predicted=predicted_y)
result.add_record(model.get_params(), key=token, confusion_matrix=confusion_matrix)
# In case any label is missing, populate it
if predicted_proba.shape[1] != num_classes:
temp_array = np.zeros((predicted_proba.shape[0], num_classes), np.float32)
for i in range(len(model.classes_)):
temp_array[:, model.classes_[i]] = predicted_proba[:, i]
predicted_proba = temp_array
return predicted_y, predicted_proba
def load_and_test(token, test_data, num_classes, result):
"""Load and test
Args:
token (:obj:`str`): token representing this run
test_data (:obj:`tuple` of :obj:`numpy.array`): Tuple of testing feature and label
num_classes (:obj:`int`): Number of classes
result (:obj:`pyActLearn.performance.record.LearningResult`): LearningResult object to hold learning result
"""
model = RandomForestClassifier(n_estimators=20, criterion="entropy")
model.set_params(result.get_record_by_key(token)['model'])
# Test
predicted_y = model.predict(test_data[0])
predicted_proba = model.predict_proba(test_data[0])
return predicted_y, predicted_proba
if __name__ == '__main__':
args_ok = False
parser = argparse.ArgumentParser(description='Run Decision Tree on single resident CASAS datasets.')
parser.add_argument('-d', '--dataset', help='Directory to original datasets')
parser.add_argument('-o', '--output', help='Output folder')
parser.add_argument('--h5py', help='HDF5 dataset folder')
args = parser.parse_args()
# Default parameters
log_filename = os.path.basename(__file__).split('.')[0] + \
'-%s.log' % datetime.now().strftime('%y%m%d_%H:%M:%S')
# Setup output directory
output_dir = args.output
if output_dir is not None:
output_dir = os.path.abspath(os.path.expanduser(output_dir))
if os.path.exists(output_dir):
# Found output_dir, check if it is a directory
if not os.path.isdir(output_dir):
exit('Output directory %s is found, but not a directory. Abort.' % output_dir)
else:
# Create directory
os.makedirs(output_dir)
else:
output_dir = '.'
# If dataset is specified, update h5py
casas_data_dir = args.dataset
if casas_data_dir is not None:
casas_data_dir = os.path.abspath(os.path.expanduser(casas_data_dir))
if not os.path.isdir(casas_data_dir):
exit('CASAS dataset at %s does not exist. Abort.' % casas_data_dir)
# Find h5py dataset first
h5py_dir = args.h5py
if h5py_dir is not None:
h5py_dir = os.path.abspath(os.path.expanduser(h5py_dir))
else:
# Default location
h5py_dir = os.path.join(output_dir, 'h5py')
if os.path.exists(h5py_dir):
if not os.path.isdir(h5py_dir):
exit('h5py dataset location %s is not a directory. Abort.' % h5py_dir)
log_filename = os.path.join(output_dir, log_filename)
# Setup Logging as early as possible
logging.basicConfig(level=logging.DEBUG,
format='[%(asctime)s] %(name)s:%(levelname)s:%(message)s',
handlers=[logging.FileHandler(log_filename),
logging.StreamHandler()])
if not CASASFuel.files_exist(h5py_dir):
# Finish check and creating all directory needed - now load datasets
if casas_data_dir is not None:
casas_data = CASASData(path=casas_data_dir)
casas_data.summary()
# SVM needs to use statistical feature with per-sensor and normalization
casas_data.populate_feature(method='stat', normalized=False, per_sensor=False)
casas_data.export_hdf5(h5py_dir)
casas_fuel = CASASFuel(dir_name=h5py_dir)
# Prepare learning result
result_pkl_file = os.path.join(output_dir, 'result.pkl')
result = None
if os.path.isfile(result_pkl_file):
f = open(result_pkl_file, 'rb')
result = pickle.load(f)
f.close()
if result.data != h5py_dir:
logger.error('Result pickle file found for different dataset %s' % result.data)
exit('Cannot save learning result at %s' % result_pkl_file)
else:
result = LearningResult(name='DecisionTree', data=h5py_dir, mode='by_week')
num_classes = casas_fuel.get_output_dims()
# Open Fuel and get all splits
split_list = casas_fuel.get_set_list()
train_names = ('week 24', 'week 23', 'week 22', 'week 21')
test_names = ('week 25', 'week 26', 'week 27', 'week 28')
test_name = 'single_test'
train_set = casas_fuel.get_dataset(train_names, load_in_memory=True)
(train_set_data) = train_set.data_sources
test_set = casas_fuel.get_dataset(test_names, load_in_memory=True)
(test_set_data) = test_set.data_sources
# Prepare Back Annotation
fp_back_annotated = open(os.path.join(output_dir, 'back_annotated.txt'), 'w')
fp_back_probability = open(os.path.join(output_dir, 'back_annotated_proba.txt'), 'w')
# run svm
logger.info('Training on %s, Testing on %s' % (str(train_names), str(test_names)))
if result.get_record_by_key(test_name) is None:
prediction, prediction_proba = training_and_test(test_name, train_set_data, test_set_data, num_classes, result)
else:
prediction, prediction_proba = load_and_test(test_name, test_set_data, num_classes, result)
casas_fuel.back_annotate(fp_back_annotated, prediction=prediction, split_name=test_names)
casas_fuel.back_annotate_with_proba(fp_back_probability, prediction_proba=prediction_proba, split_name=test_names)
train_name = test_name
train_set_data = test_set_data
f = open(result_pkl_file, 'wb')
pickle.dump(obj=result, file=f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
result.export_to_xlsx(os.path.join(output_dir, 'result.xlsx'))
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.