repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
fyffyt/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
tomevans/gps | gps/example.py | 1 | 4325 | import gp_class, gp_routines, kernels
import time
import numpy as np
import matplotlib.pyplot as plt
# A simple script to illustrate the basic
# features of the gps package.
# Toy dataset:
n = 25
x = np.r_[ -5:5:1j*n ]
y = np.sin( x ) + 0.2*x
whitenoise = 0.2
e = whitenoise*np.random.randn( n )
data = y + e
# Create GP object:
gp_obj = gp_class.gp( which_type='full' ) # full rather than sparse GP
gp_obj.mfunc = None # zero mean function; otherwise point to user-defined mean function
gp_obj.mpars = {} # empty dict for mean function parameters seeing as we're using a zero mean function
gp_obj.cfunc = kernels.sqexp # squared exponential covariance kernel
gp_obj.cpars = { 'amp':1, 'scale':2 } # covariance parameters
# Note: Users are encouraged to write their own covariance
# kernels to suit their specific task; the format for doing
# this is very straightforward - see the kernels.py module
# for examples and notes.
# Training inputs must be NxM array where M is
# the dimensionality of input space; here M=1:
gp_obj.xtrain = np.reshape( x, [ n, 1 ] )
# The training data must be Nx1:
gp_obj.dtrain = np.reshape( data, [ n, 1 ] )
# White noise error term:
gp_obj.etrain = whitenoise
# Note that this can alternatively be set as an
# Nx1 array if different error terms are associated
# with different data points.
# Generate some random draws from the GP prior,
# assuming we haven't seen the data yet:
xmesh = np.reshape( np.r_[ -5:5:1j*400 ], [ 400, 1 ] )
emesh = None # i.e. set white noise to zero on draws
draws_unconditioned = gp_obj.random_draw( xmesh=xmesh, emesh=emesh, conditioned=False, \
ndraws=4, plot_draws=True )
# Now do the same thing, but with the random draws
# taken from the GP conditioned on the data:
draws_conditioned = gp_obj.random_draw( xmesh=xmesh, emesh=emesh, conditioned=True, \
ndraws=4, plot_draws=True )
# In practice, we would probably like to optimise
# the covariance parameters using the training data.
# We can do this by wrapping the GP log likelihood
# inside an optimiser, MCMC etc. Here's how to
# evaluate the log likelihood:
t1 = time.time()
logp = gp_obj.logp_builtin()
t2 = time.time()
print( '\nlogp_builtin = {0}'.format( logp ) )
print( 'time taken = {0:.5f} sec'.format( t2-t1 ) )
# Sometimes we might want to fix the covariance
# parameters, and optimise for the mean function
# parameters. In this case, the expensive matrix
# inversion needed to evaluate the GP likelihood
# only needs to be performed once, so subsequent
# evaluations of the likelihood are very quick.
# Here's how to do this:
cov_kwpars = gp_obj.prep_fixedcov() # does precomputations before running optimiser
t1 = time.time()
logp = gp_obj.logp_fixedcov( resids=gp_obj.dtrain, kwpars=cov_kwpars ) # wrap this in optimiser
t2 = time.time()
print( '\nlogp_fixedcov = {0}'.format( logp ) )
print( 'time taken = {0:.5f} sec'.format( t2-t1 ) )
# Note: In a real problem, the 'resids' input is
# usually the difference between our training data
# and our model for the mean function. Here, seeing
# as we're using a zero mean function, we can assume
# this step has already been performed, i.e. the
# training data are already our model residuals.
# It's possible to evaluate the mean and covariance
# matrix of the GP:
mu, cov = gp_obj.meancov( xnew=xmesh, enew=gp_obj.etrain, conditioned=True )
# Or if you only want to evaluate the diagonal terms
# of the covariance matrix to save time, use:
mu, sig = gp_obj.predictive( xnew=xmesh, enew=gp_obj.etrain, conditioned=True )
# The 'sig' output is the square root of the covariance
# matrix diagonal, so it can be thought of as the 1-sigma
# predictive uncertainty.
plt.figure()
plt.errorbar( x, data, yerr=whitenoise, fmt='ok' )
plt.plot( xmesh.flatten(), mu.flatten(), '-b', lw=2 )
plt.fill_between( xmesh.flatten(), \
mu.flatten()-2*sig.flatten(), \
mu.flatten()+2*sig.flatten(), \
color=[ 0.9, 0.9, 0.9 ] )
plt.fill_between( xmesh.flatten(), \
mu.flatten()-1*sig.flatten(), \
mu.flatten()+1*sig.flatten(), \
color=[ 0.7, 0.7, 0.7 ] )
plt.title( 'Predictive distribution with 1-sigma and 2-sigma uncertainties shaded' )
| gpl-2.0 |
selective-inference/selective-inference | doc/learning_examples/BH/logit_targets_BH_single.py | 3 | 5676 | import functools
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
from selection.tests.instance import gaussian_instance
from selection.learning.core import (infer_full_target,
split_sampler,
normal_sampler,
logit_fit,
gbm_fit,
repeat_selection,
probit_fit)
from selection.learning.utils import pivot_plot
from selection.learning.learners import mixture_learner
mixture_learner.scales = [1]*10 + [1.5,2,3,4,5,10]
def BHfilter(pval, q=0.2):
pval = np.asarray(pval)
pval_sort = np.sort(pval)
comparison = q * np.arange(1, pval.shape[0] + 1.) / pval.shape[0]
passing = pval_sort < comparison
if passing.sum():
thresh = comparison[np.nonzero(passing)[0].max()]
return np.nonzero(pval <= thresh)[0]
return []
def simulate(n=200, p=100, s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=1000):
# description of statistical problem
X, y, truth = gaussian_instance(n=n,
p=p,
s=s,
equicorrelated=False,
rho=0.5,
sigma=sigma,
signal=signal,
random_signs=True,
scale=False)[:3]
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = np.linalg.norm(resid)**2 / (n-p)
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
smooth_sampler = normal_sampler(S, covS)
splitting_sampler = split_sampler(X * y[:, None], covS)
def meta_algorithm(XTX, XTXi, dispersion, sampler):
p = XTX.shape[0]
success = np.zeros(p)
scale = 0.
noisy_S = sampler(scale=scale)
soln = XTXi.dot(noisy_S)
solnZ = soln / (np.sqrt(np.diag(XTXi)) * np.sqrt(dispersion))
pval = ndist.cdf(solnZ)
pval = 2 * np.minimum(pval, 1 - pval)
return set(BHfilter(pval, q=0.2))
selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi, dispersion)
# run selection algorithm
success_params = (1, 1)
observed_set = repeat_selection(selection_algorithm, smooth_sampler, *success_params)
# find the target, based on the observed outcome
# we just take the first target
targets = []
idx = sorted(observed_set)
np.random.shuffle(idx)
idx = idx[:1]
if len(idx) > 0:
print("variable: ", idx, "total selected: ", len(observed_set))
true_target = truth[idx]
results = infer_full_target(selection_algorithm,
observed_set,
idx,
splitting_sampler,
dispersion,
hypothesis=true_target,
fit_probability=logit_fit,
fit_args={'df':20},
success_params=success_params,
alpha=alpha,
B=B,
single=True)
pvalues = [r[2] for r in results]
covered = [(r[1][0] < t) * (r[1][1] > t) for r, t in zip(results, true_target)]
pivots = [r[0] for r in results]
target_sd = np.sqrt(np.diag(dispersion * XTXi)[idx])
observed_target = XTXi[idx].dot(X.T.dot(y))
quantile = ndist.ppf(1 - 0.5 * alpha)
naive_interval = np.vstack([observed_target - quantile * target_sd, observed_target + quantile * target_sd])
naive_pivots = (1 - ndist.cdf((observed_target - true_target) / target_sd))
naive_pivots = 2 * np.minimum(naive_pivots, 1 - naive_pivots)
naive_pvalues = (1 - ndist.cdf(observed_target / target_sd))
naive_pvalues = 2 * np.minimum(naive_pvalues, 1 - naive_pvalues)
naive_covered = (naive_interval[0] < true_target) * (naive_interval[1] > true_target)
naive_lengths = naive_interval[1] - naive_interval[0]
lower = [r[1][0] for r in results]
upper = [r[1][1] for r in results]
lengths = np.array(upper) - np.array(lower)
return pd.DataFrame({'pivot':pivots,
'pvalue':pvalues,
'coverage':covered,
'length':lengths,
'naive_pivot':naive_pivots,
'naive_coverage':naive_covered,
'naive_length':naive_lengths,
'upper':upper,
'lower':lower,
'targets':true_target,
'batch_size':B * np.ones(len(idx), np.int)})
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
for i in range(2000):
df = simulate(B=5000)
csvfile = 'logit_targets_BH_single.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try: # concatenate to disk
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, length_ax = pivot_plot(df, outbase)
| bsd-3-clause |
imito/odin | examples/cifar10_ivec.py | 1 | 1669 | from __future__ import print_function, division, absolute_import
import os
os.environ['ODIN'] = 'gpu,float32'
import shutil
import numpy as np
import tensorflow as tf
from odin import backend as K, nnet as N, visual as V, fuel as F
from odin.utils import batching, Progbar, get_exppath, crypto
from odin import ml
from sklearn.svm import SVC
from sklearn.metrics import classification_report
EXP_PATH = get_exppath('cifar10_ivec')
# ===========================================================================
# Load the dataset
# ===========================================================================
ds = F.CIFAR10.load()
print(ds)
X_train, y_train = ds['X_train'][:].reshape(-1, 3 * 32 * 32), ds['y_train'][:]
X_test, y_test = ds['X_test'][:].reshape(-1, 3 * 32 * 32), ds['y_test'][:]
# ====== normalize the data ====== #
X_train = X_train / 255.
X_test = X_test / 255.
print("Input:", X_train.shape, X_test.shape)
# ===========================================================================
# Training the GMM
# ===========================================================================
ivec = ml.Ivector(path=EXP_PATH, nmix=32, tv_dim=16,
niter_gmm=8, niter_tmat=8)
ivec.fit(X_train)
I_train = ivec.transform(X_train, save_ivecs=True, name='train')[:]
I_test = ivec.transform(X_test, save_ivecs=True, name='test')[:]
print(ivec)
# ===========================================================================
# Classifier
# ===========================================================================
svm = SVC()
svm.fit(I_train, y_train)
print(classification_report(y_true=y_test,
y_pred=svm.predict(I_test)))
| mit |
ZENGXH/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
vshtanko/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
rkeisler/sehgal | sehgal.py | 1 | 3371 | import numpy as np
import ipdb
import pickle
import matplotlib.pylab as pl
pl.ion()
datadir = 'data/'
nside = 8192
def make_ksz_cutouts_for_sptsz_like_catalog():
d = load_sptsz_like_catalog()
ksz = load_ksz_uk_cmb()
cutouts = make_cutouts_from_catalog(d, ksz)
# write to fits
from astropy.io import fits
hdus = [fits.PrimaryHDU(cutouts), fits.ImageHDU(d['z']), fits.ImageHDU(d['m500c'])]
hdulist = fits.HDUList(hdus)
hdulist.writeto('sehgal_ksz_cutouts_for_sptsz_like_catalog.fits')
def make_cutouts_from_catalog(catalog, hpix_map, reso_arcmin=0.5, nside=61):
add_healpix_coordinates(catalog)
import healpy as hp
lon_deg = catalog['ra']
lat_deg = catalog['dec']
ncl = len(catalog['ra'])
cutouts = np.zeros((ncl, nside, nside))
for i, lon, lat in zip(range(ncl), lon_deg, lat_deg):
print '%i/%i'%(i,ncl)
pl.clf()
cutout = hp.gnomview(hpix_map, rot=(lon, lat, 0), fig=1,
reso=reso_arcmin, xsize=nside, ysize=nside,
return_projected_map=True)
cutouts[i, :, :] = cutout
return cutouts
def measure_ksz_rms_at_sptsz_like_clusters():
measure_ksz_rms_for_catalog(load_sptsz_like_catalog())
def measure_ksz_rms_at_ssdf_like_clusters():
measure_ksz_rms_for_catalog(load_ssdf_like_catalog())
def measure_ksz_rms_for_catalog(d):
add_healpix_coordinates(d)
ksz_uk = laod_ksz_uk_cmb()
rms = ksz_uk[d['ind_hpix']].std()
print 'RMS is %0.1f uK-CMB'%(rms)
def load_ksz_uk_cmb():
from astropy.io import fits
tmp = fits.open(datadir+'219_ksz_healpix.fits')[1].data['signal']
ksz_uk = tmp*jy_per_steradian_to_k_cmb(219)*1e6
return ksz_uk
def add_healpix_coordinates(d):
import healpy as hp
phi = d['ra']*np.pi/180.
theta = (90.-d['dec'])*np.pi/180.
d['ind_hpix'] = hp.ang2pix(nside, theta, phi, nest=False)
d['phi'] = phi
d['theta'] = theta
def load_sptsz_like_catalog():
return load_halo_catalog(m500c_min=2e14, z_min=0.1)
def load_ssdf_like_catalog():
return load_halo_catalog(m500c_min=1.8e14, z_min=1.3)
def load_halo_catalog(m500c_min=-1, m500c_max=9e99, z_min=-1, z_max=100):
# see http://lambda.gsfc.nasa.gov/toolbox/tb_sim_readme_halo.cfm for definitions
# and note that all masses are w.r.t critical densities and have no h's in them.
import os.path
if not(os.path.isfile(datadir+'halo_nbody.npy')):
make_halo_nbody_npy()
tmp = np.load(datadir+'halo_nbody.npy')
wh_keep = np.where( (tmp[:,14]>m500c_min) & (tmp[:,14]<m500c_max) & (tmp[:,0]>z_min) & (tmp[:,0]<z_max) )[0]
tmp = tmp[wh_keep, :]
keys = ['z','ra','dec',
'xpos','ypos','zpos',
'xvel','yvel','zvel',
'mfof','mvir','rvir',
'm200c','r200c',
'm500c','r500c',
'm1500c','r1500c',
'm2500c','r2500c']
d = {}
for i in range(len(keys)):
d[keys[i]] = tmp[:, i]
return d
def make_halo_nbody_npy():
tmp = np.loadtxt(datadir+'halo_nbody.ascii')
np.save(datadir+'halo_nbody', tmp)
def jy_per_steradian_to_k_cmb(nu):
tcmb = 2.72548
return tcmb/{
30:7.364967e7,
90:5.526540e8,
148:1.072480e9,
219:1.318837e9,
277:1.182877e9,
350:8.247628e8}[nu]
| bsd-3-clause |
NewKnowledge/punk | punk/aggregator/aggregateByDateTime.py | 1 | 2577 | import pandas as pd
import numpy as np
from typing import List, NamedTuple
from .timeseries import agg_by_date
from primitive_interfaces.base import PrimitiveBase
Inputs = pd.DataFrame
Outputs = np.ndarray
Params = dict
CallMetadata = dict
class AggregateByDateTime(PrimitiveBase[Inputs, Outputs, Params]):
__author__ = 'distil'
__metadata__ = {
"id": "0a67edef-d032-37b7-b88c-792f567177e9",
"name": "punk.aggregator.aggregateByDateTime.AggregateByDateTime",
"common_name": "DatetimeAggregation",
"description": "Arbitrary groupby aggregations over intervals of time",
"languages": [
"python3.6"
],
"library": "punk",
"version": "1.1.1",
"source_code": "https://github.com/NewKnowledge/punk/blob/dev/punk/aggregator/aggregateByDateTime.py",
"is_class": True,
"algorithm_type": [
"aggregation"
],
"task_type": [
"data cleaning"
],
"output_type": [
"features"
],
"team": "distil",
"schema_version": 1.0,
"build": [
{
"type": "pip",
"package": "punk"
}
],
"compute_resources": {
"sample_size": [
1000.0,
10.0
],
"sample_unit": [
"MB"
],
"num_nodes": [
1
],
"cores_per_node": [
1
],
"gpus_per_node": [
0
],
"mem_per_node": [
1.0
],
"disk_per_node": [
1.0
],
"mem_per_gpu": [
0.0
],
"expected_running_time": [
5.0
]
}
}
def __init__(self):
pass
def get_params(self) -> Params:
return {}
def set_params(self, params: Params) -> None:
self.params = params
def get_call_metadata(self) -> CallMetadata:
return {}
def fit(self) -> None:
pass
def produce(self, inputs: Inputs, datetime: List[str] = None, values: List[str] = [],
interval: str = None, aggregation: str = 'mean') -> Outputs:
return agg_by_date(inputs, datetime, values, interval=interval, agg=aggregation) | mit |
Myasuka/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 213 | 11911 | import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
kernc/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 99 | 4163 |
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
simon-pepin/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
tedlaz/pyted | elee/elee/parsers.py | 1 | 6604 | """
Παρσάρισμα ημερολογίου singular
"""
from . import utils as ul
from . import arthro
def check_line(line, stxt):
"""
Ελέγχει μια γραμμή κειμένου για συγκεκριμένους χαρακτήρες σε επιλεγμένα
σημεία. Άν δεν υπάρχουν οι αναμενόμενοι χαρακτήρες στις θέσεις τους
επιστρέφει False, διαφορετικά επιστρέφει True
stxt: '0:t|2:r|5:s'
"""
# Μετατρέπουμε το stxt σε dictionary
dpos = {int(s[0]): s[1] for s in [k.split(':') for k in stxt.split('|')]}
lline = len(line)
for position in dpos:
if lline <= position: # Το μήκος της γραμμής μεγαλύτερο από τη θέση
return False
if line[position] != dpos[position]:
return False
return True
def parse_el(elfile, encoding='WINDOWS-1253'):
dat = par = per = lmo = lmp = xre = pis = ''
lmoi = {}
arthra = []
lineper = 0
arthro_number = line_number = 1
with open(elfile, encoding=encoding) as afile:
for i, lin in enumerate(afile):
# Here we have first line for article
if check_line(lin, '4:/|7:/|50:.|53:.|56:.|134:,|149:,'):
dat = ul.iso_date_from_greek(lin[2:12])
par = ul.remove_simple_quotes(lin[22:48])
arth = arthro.Arthro(dat, par, per, arthro_number)
arthro_number += 1
arthra.append(arth)
lineper = i + 1
if check_line(lin, '50:.|53:.|56:.|134:,|149:,|152: '):
lmo = lin[48:60].strip()
lmp = ul.remove_simple_quotes(lin[77:122])
xre = ul.dec(ul.iso_number_from_greek(lin[124:137]))
pis = ul.dec(ul.iso_number_from_greek(lin[139:152]))
arth.add_line(lmo, xre, pis, line_number)
line_number += 1
if lmo not in lmoi:
lmoi[lmo] = lmp
elif i == lineper and i > 0:
if len(lin) < 49 or len(lin) > 130:
lineper += 1
continue
if lin[47] != ' ' or lin[22:27] == 'Σχετ.':
lineper += 1
continue
arth.pe2 = lin[23:48].strip()
arth.per = lin[48:].strip()
lineper = 0
return lmoi, arthra
def parse_el_pandas(elfile, encoding='WINDOWS-1253'):
dat = par = per = lmo = lmp = xre = pis = ''
lmoi = {}
arthra = []
lineper = 0
lins = []
arthro_number = line_number = 1
with open(elfile, encoding=encoding) as afile:
for i, lin in enumerate(afile):
# Here we have first line for article
if check_line(lin, '4:/|7:/|50:.|53:.|56:.|134:,|149:,'):
dat = ul.iso_date_from_greek(lin[2:12])
par = ul.remove_simple_quotes(lin[22:48])
arth = arthro.Arthro(dat, par, per, arthro_number)
arthro_number += 1
arthra.append(arth)
lineper = i + 1
if check_line(lin, '50:.|53:.|56:.|134:,|149:,|152: '):
lmo = lin[48:60].strip()
lmp = ul.remove_simple_quotes(lin[77:122])
xre = ul.dec(ul.iso_number_from_greek(lin[124:137]))
pis = ul.dec(ul.iso_number_from_greek(lin[139:152]))
arth.add_line(lmo, xre, pis, line_number)
lins.append([arthro_number, line_number, dat,
par, lmo, xre, pis])
line_number += 1
if lmo not in lmoi:
lmoi[lmo] = lmp
elif i == lineper and i > 0:
if len(lin) < 49 or len(lin) > 130:
lineper += 1
continue
if lin[47] != ' ' or lin[22:27] == 'Σχετ.':
lineper += 1
continue
arth.pe2 = lin[23:48].strip()
arth.per = lin[48:].strip()
lineper = 0
return lmoi, arthra, lins
def parse_ee_old(eefile, encoding='WINDOWS-1253'):
name_afm = {} # {'tedlaz': 04678}
dublicates = {}
with open(eefile, encoding=encoding) as afile:
for i, lin in enumerate(afile):
if len(lin) < 100:
continue
vals = lin[67:91].split()
if len(vals) == 0:
continue
if ul.is_afm(vals[0]):
afm = vals[0]
name = lin[77:91].split('-')[0].strip()
if name in name_afm.keys():
if afm == name_afm[name]:
continue
else: # Ιδιο όνομα με διαφορετικό ΑΦΜ
print("Ίδιο όνομα με άλλο ΑΦΜ (γραμμή %s)" % (i + 1))
name = '%s -> %s' % (name, i + 1)
dublicates[name] = afm
else:
name_afm[name] = afm
return name_afm, dublicates
def parse_ee(eefile, encoding='WINDOWS-1253'):
"""
"""
adi = {}
dat = ''
with open(eefile, encoding=encoding) as afile:
for lin in afile:
if lin[2:14] == 'Κινήσεις της':
dat = lin[32:42]
# print(dat)
if len(lin) < 100:
continue
vals = lin[67:91].split()
if len(vals) == 0:
continue
if ul.is_afm(vals[0]):
afm = vals[0]
name = lin[77:91].split('-')[0].strip()
adi[dat] = adi.get(dat, {})
adi[dat][name] = afm
return adi
def parse_ee_flat(eefile, encoding='WINDOWS-1253'):
"""
Επιστρέφει list of tuples [(dat1, name1, afm1), (dat2, name2, afm2), ..]
"""
date_name_afm = []
dat = ''
with open(eefile, encoding=encoding) as afile:
for lin in afile:
if lin[2:14] == 'Κινήσεις της':
dat = ul.iso_date_from_greek(lin[32:42])
# print(dat)
if len(lin) < 100:
continue
vals = lin[67:91].split()
if len(vals) == 0:
continue
if ul.is_afm(vals[0]):
afm = vals[0]
name = lin[77:91].split('-')[0].strip()
date_name_afm.append((dat, name, afm))
return date_name_afm
| gpl-3.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tests/series/test_internals.py | 7 | 12848 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime
from numpy import nan
import numpy as np
from pandas import Series
from pandas.tseries.index import Timestamp
import pandas.lib as lib
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
class TestSeriesInternals(tm.TestCase):
_multiprocess_can_split_ = True
def test_convert_objects(self):
s = Series([1., 2, 3], index=['a', 'b', 'c'])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype('O')
r['a'] = '1'
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = '1.'
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = 'garbled'
expected = s.copy()
expected['a'] = np.nan
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, 'na', 3, 4])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
s = Series([1, '', 3, 4])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
# dates
s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0)])
s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0), 'foo', 1.0, 1,
Timestamp('20010104'), '20010105'],
dtype='O')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates=True,
convert_numeric=False)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103')], dtype='M8[ns]')
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=True)
assert_series_equal(result, expected)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103'),
lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'),
Timestamp('20010105')], dtype='M8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = s2.convert_objects(convert_dates='coerce',
convert_numeric=False)
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = s2.convert_objects(convert_dates='coerce',
convert_numeric=True)
assert_series_equal(result, expected)
# preserver all-nans (if convert_dates='coerce')
s = Series(['foo', 'bar', 1, 1.0], dtype='O')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
expected = Series([lib.NaT] * 2 + [Timestamp(1)] * 2)
assert_series_equal(result, expected)
# preserver if non-object
s = Series([1], dtype='float32')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
assert_series_equal(result, s)
# r = s.copy()
# r[0] = np.nan
# result = r.convert_objects(convert_dates=True,convert_numeric=False)
# self.assertEqual(result.dtype, 'M8[ns]')
# dateutil parses some single letters into today's value as a date
for x in 'abcdefghijklmnopqrstuvwxyz':
s = Series([x])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
s = Series([x.upper()])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
def test_convert_objects_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
with tm.assert_produces_warning(FutureWarning):
r = s.convert_objects(convert_numeric=True)
e = Series([1, 1, 3, 5], dtype='i8')
tm.assert_series_equal(r, e)
def test_convert_objects_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
with tm.assert_produces_warning(FutureWarning):
r = s.convert_objects(convert_numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
# GH 10265
def test_convert(self):
# Tests: All to nans, coerce, true
# Test coercion returns correct type
s = Series(['a', 'b', 'c'])
results = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT] * 3)
assert_series_equal(results, expected)
results = s._convert(numeric=True, coerce=True)
expected = Series([np.nan] * 3)
assert_series_equal(results, expected)
expected = Series([lib.NaT] * 3, dtype=np.dtype('m8[ns]'))
results = s._convert(timedelta=True, coerce=True)
assert_series_equal(results, expected)
dt = datetime(2001, 1, 1, 0, 0)
td = dt - datetime(2000, 1, 1, 0, 0)
# Test coercion with mixed types
s = Series(['a', '3.1415', dt, td])
results = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT, lib.NaT, dt, lib.NaT])
assert_series_equal(results, expected)
results = s._convert(numeric=True, coerce=True)
expected = Series([nan, 3.1415, nan, nan])
assert_series_equal(results, expected)
results = s._convert(timedelta=True, coerce=True)
expected = Series([lib.NaT, lib.NaT, lib.NaT, td],
dtype=np.dtype('m8[ns]'))
assert_series_equal(results, expected)
# Test standard conversion returns original
results = s._convert(datetime=True)
assert_series_equal(results, s)
results = s._convert(numeric=True)
expected = Series([nan, 3.1415, nan, nan])
assert_series_equal(results, expected)
results = s._convert(timedelta=True)
assert_series_equal(results, s)
# test pass-through and non-conversion when other types selected
s = Series(['1.0', '2.0', '3.0'])
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([1.0, 2.0, 3.0])
assert_series_equal(results, expected)
results = s._convert(True, False, True)
assert_series_equal(results, s)
s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 1, 0, 0)],
dtype='O')
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 1, 0,
0)])
assert_series_equal(results, expected)
results = s._convert(datetime=False, numeric=True, timedelta=True)
assert_series_equal(results, s)
td = datetime(2001, 1, 1, 0, 0) - datetime(2000, 1, 1, 0, 0)
s = Series([td, td], dtype='O')
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([td, td])
assert_series_equal(results, expected)
results = s._convert(True, True, False)
assert_series_equal(results, s)
s = Series([1., 2, 3], index=['a', 'b', 'c'])
result = s._convert(numeric=True)
assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype('O')
r['a'] = '1'
result = r._convert(numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = '1.'
result = r._convert(numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = 'garbled'
result = r._convert(numeric=True)
expected = s.copy()
expected['a'] = nan
assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, 'na', 3, 4])
result = s._convert(datetime=True, numeric=True)
expected = Series([1, nan, 3, 4])
assert_series_equal(result, expected)
s = Series([1, '', 3, 4])
result = s._convert(datetime=True, numeric=True)
assert_series_equal(result, expected)
# dates
s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0)])
s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0), 'foo', 1.0, 1,
Timestamp('20010104'), '20010105'], dtype='O')
result = s._convert(datetime=True)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103')], dtype='M8[ns]')
assert_series_equal(result, expected)
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103'), lib.NaT, lib.NaT, lib.NaT,
Timestamp('20010104'), Timestamp('20010105')],
dtype='M8[ns]')
result = s2._convert(datetime=True, numeric=False, timedelta=False,
coerce=True)
assert_series_equal(result, expected)
result = s2._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
s = Series(['foo', 'bar', 1, 1.0], dtype='O')
result = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT] * 2 + [Timestamp(1)] * 2)
assert_series_equal(result, expected)
# preserver if non-object
s = Series([1], dtype='float32')
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, s)
# r = s.copy()
# r[0] = np.nan
# result = r._convert(convert_dates=True,convert_numeric=False)
# self.assertEqual(result.dtype, 'M8[ns]')
# dateutil parses some single letters into today's value as a date
expected = Series([lib.NaT])
for x in 'abcdefghijklmnopqrstuvwxyz':
s = Series([x])
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
s = Series([x.upper()])
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
def test_convert_no_arg_error(self):
s = Series(['1.0', '2'])
self.assertRaises(ValueError, s._convert)
def test_convert_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
r = s._convert(datetime=True, numeric=True)
e = Series([1, 1, 3, 5], dtype='i8')
tm.assert_series_equal(r, e)
def test_convert_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
r = s._convert(datetime=True, numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
| apache-2.0 |
trachelr/mne-python | examples/visualization/plot_clickable_image.py | 7 | 2317 | """
================================================================
Demonstration of how to use ClickableImage / generate_2d_layout.
================================================================
In this example, we open an image file, then use ClickableImage to
return 2D locations of mouse clicks (or load a file already created).
Then, we use generate_2d_layout to turn those xy positions into a layout
for use with plotting topo maps. In this way, you can take arbitrary xy
positions and turn them into a plottable layout.
"""
# Authors: Christopher Holdgraf <choldgraf@berkeley.edu>
#
# License: BSD (3-clause)
from scipy.ndimage import imread
import numpy as np
from matplotlib import pyplot as plt
from os import path as op
import mne
from mne.viz import ClickableImage, add_background_image # noqa
from mne.channels import generate_2d_layout # noqa
print(__doc__)
# Set parameters and paths
plt.rcParams['image.cmap'] = 'gray'
im_path = op.join(op.dirname(mne.__file__), 'data', 'image', 'mni_brain.gif')
# We've already clicked and exported
layout_path = op.join(op.dirname(mne.__file__), 'data', 'image')
layout_name = 'custom_layout.lout'
###############################################################################
# Load data and click
im = imread(im_path)
plt.imshow(im)
"""
This code opens the image so you can click on it. Commented out
because we've stored the clicks as a layout file already.
# The click coordinates are stored as a list of tuples
click = ClickableImage(im)
click.plot_clicks()
coords = click.coords
# Generate a layout from our clicks and normalize by the image
lt = generate_2d_layout(np.vstack(coords), bg_image=im)
lt.save(layout_path + layout_name) # To save if we want
"""
# We've already got the layout, load it
lt = mne.channels.read_layout(layout_name, path=layout_path, scale=False)
# Create some fake data
nchans = len(lt.pos)
nepochs = 50
sr = 1000
nsec = 5
events = np.arange(nepochs).reshape([-1, 1])
events = np.hstack([events, np.zeros([nepochs, 2])])
data = np.random.randn(nepochs, nchans, sr * nsec)
info = mne.create_info(nchans, sr, ch_types='eeg')
epochs = mne.EpochsArray(data, info, events)
# Using the native plot_topo function with the image plotted in the background
f = mne.viz.plot_topo(epochs.average(), layout=lt, fig_background=im)
| bsd-3-clause |
MJuddBooth/pandas | pandas/io/excel/_xlwt.py | 1 | 4557 | import pandas._libs.json as json
from pandas.io.excel._base import ExcelWriter
from pandas.io.excel._util import _validate_freeze_panes
class _XlwtWriter(ExcelWriter):
engine = 'xlwt'
supported_extensions = ('.xls',)
def __init__(self, path, engine=None, encoding=None, mode='w',
**engine_kwargs):
# Use the xlwt module as the Excel writer.
import xlwt
engine_kwargs['engine'] = engine
if mode == 'a':
raise ValueError('Append mode is not supported with xlwt!')
super(_XlwtWriter, self).__init__(path, mode=mode, **engine_kwargs)
if encoding is None:
encoding = 'ascii'
self.book = xlwt.Workbook(encoding=encoding)
self.fm_datetime = xlwt.easyxf(num_format_str=self.datetime_format)
self.fm_date = xlwt.easyxf(num_format_str=self.date_format)
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
# Write the frame cells using xlwt.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_sheet(sheet_name)
self.sheets[sheet_name] = wks
if _validate_freeze_panes(freeze_panes):
wks.set_panes_frozen(True)
wks.set_horz_split_pos(freeze_panes[0])
wks.set_vert_split_pos(freeze_panes[1])
style_dict = {}
for cell in cells:
val, fmt = self._value_with_fmt(cell.val)
stylekey = json.dumps(cell.style)
if fmt:
stylekey += fmt
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self._convert_to_style(cell.style, fmt)
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.write_merge(startrow + cell.row,
startrow + cell.mergestart,
startcol + cell.col,
startcol + cell.mergeend,
val, style)
else:
wks.write(startrow + cell.row,
startcol + cell.col,
val, style)
@classmethod
def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',',
line_sep=';'):
"""helper which recursively generate an xlwt easy style string
for example:
hstyle = {"font": {"bold": True},
"border": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"align": {"horiz": "center"}}
will be converted to
font: bold on; \
border: top thin, right thin, bottom thin, left thin; \
align: horiz center;
"""
if hasattr(item, 'items'):
if firstlevel:
it = ["{key}: {val}"
.format(key=key, val=cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "{sep} ".format(sep=(line_sep).join(it))
return out
else:
it = ["{key} {val}"
.format(key=key, val=cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "{sep} ".format(sep=(field_sep).join(it))
return out
else:
item = "{item}".format(item=item)
item = item.replace("True", "on")
item = item.replace("False", "off")
return item
@classmethod
def _convert_to_style(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlwt style object
Parameters
----------
style_dict : style dictionary to convert
num_format_str : optional number format string
"""
import xlwt
if style_dict:
xlwt_stylestr = cls._style_to_xlwt(style_dict)
style = xlwt.easyxf(xlwt_stylestr, field_sep=',', line_sep=';')
else:
style = xlwt.XFStyle()
if num_format_str is not None:
style.num_format_str = num_format_str
return style
| bsd-3-clause |
ProjectsUCSC/NLP | User Modelling/stance.py | 1 | 27172 | from lib_stance import *
#from keras.layers.merge import Concatenate
from keras.layers import Merge
import copy
from collections import Counter
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
import math
word2topic = pickle.load(open("word2topic", "r"))
embedding = pickle.load(open("word2topic", "r"))
#embedding = pickle.load(open("embedding", "r"))
vocab = word2topic.keys()
max_words = 25#30
depth_embed = 100#370
depth_distance = 100#368#70#100
##def getEmbedding_word2vec(sentence, model):
#
# global max_words, depth, no_features, train_length
## model = model[0]
# list = np.array([])
# for word in sentence:
# if word in model.wv.vocab:
# list = np.append(list, model.wv[word])
#
# #print list.size
# if(list.size > depth*max_words):
# list = list[0:depth*max_words]
# #print sentence
# pad = np.zeros(depth*max_words - list.size)
# list = np.append(list, pad)
# #print list.shape
# return list
def get_topic_rep(topic, word2topic, word2vec):
global vocab
topics = str(topic).split(' ')
v = np.zeros(np.concatenate((word2topic['donald'], word2vec.wv['donald'])).shape)
counter = 0
# if topics[0] in vocab:
# v = np.append(v, word#2topic[topics[0]])
## counter = 0
## if
for counter in range(len(topics)):
if topics[counter] in vocab:
# print topics[counter]
try:
v += np.concatenate((word2topic[topics[counter]], word2vec.wv[topics[counter]]))
except:
v += np.concatenate((word2topic[topics[counter]], np.zeros(word2vec.wv['donald'].shape)))
# print counter + 1
v /= (counter + 1) * 1.0
# print type(v)
return v
def custom_loss(y_true, y_pred):
y = K.argmax(y_true, axis=1)
print y[0:5]
## y_true = np.array(y_true).astype('int64')
##
print y_true[0:5]
## length = y_true.get_shape()
## l = tuple([length[i].value for i in range(0, len(length))])[0]
# for i in range(y_pred.get_shape()[0].value):
# y_pred[i] = y_pred[i][y[i]]
#
# y_pred = K.log(y_pred[:, K.constant(y, dtype='int64')])
return K.mean(K.categorical_crossentropy(y_pred[np.where(K.eval(K.equal(y, 0)))[0], :], y_true[np.where(K.eval(K.equal(y, 0)))[0], :]), K.categorical_crossentropy(y_pred[np.where(K.eval(K.equal(y, 1)))[0], :], y_true[np.where(K.eval(K.equal(y, 1)))[0], :]), K.categorical_crossentropy(y_pred[np.where(K.eval(K.equal(y, 2)))[0], :], y_true[np.where(K.eval(K.equal(y, 2)))[0], :]))
# return K.sum(K.mean(K.dot(K.equal(y, 0), y_pred)), K.mean(K.dot(K.equal(y, 1), y_pred)), K.mean(K.dot(K.equal(y, 2), y_pred)))
def evaluate(y_test, thresholded_pred):
print "accuracy", (sum(abs(y_test == thresholded_pred))) / float(len(thresholded_pred))
print Counter(y_test)
print Counter(thresholded_pred)
print confusion_matrix(y_test, thresholded_pred)
print "f1 is", f1_score(y_test, thresholded_pred, average='macro')
def distance_embed(sentence):
global max_words, depth_distance, word2topic
list = np.array([])
for word in sentence:
if word in vocab:
list = np.append(list, word2topic[word])
#print list.size
if(list.size > max_words * depth_distance):
list = list[0:max_words * depth_distance]
#print sentence
pad = np.zeros(max_words * depth_distance - list.size)
list = np.append(list, pad)
#print list.shape
return list
def getEmbedding(sentence, model):
global max_words, depth_embed, embedding#, depth_distance
list = np.array([])
for word in sentence:
if word in vocab:
try:
list = np.append(list, model[word])
# print "found", word
except:
list = np.append(list, np.zeros(model['donald'].shape))
# print word
#print list.size
if(list.size > max_words * depth_embed):
list = list[0:max_words * depth_embed]
#print sentence
pad = np.zeros(max_words * depth_embed - list.size)
list = np.append(list, pad)
#print list.shape
return list
#def getPOS(sentence):
#
# global max_words#, depth
# all_tags = CMUTweetTagger.runtagger_parse(sentence)
# list = np.array([])
# for i in range(len(sentence)):
# if sentence[i] in vocab:
# list = np.append(list, all_tags[i])
#
# #print list.size
# if(list.size > max_words):
# list = list[0:max_words]
# #print sentence
# pad = np.zeros(max_words - list.size)
# list = np.append(list, pad)
# #print list.shape
# return list
#def getEmbedding(sentence):
# global word2topic, vocab
# max_words = 30
# list = []#np.array([])
# for word in sentence:
# if word in vocab:
## list = np.append(list, word2topic[word])
# list.append(word2topic[word])
## list = np.array(list)
# #print list.size
# if(len(list) > max_words):
# list = list[0:max_words]
# #print sentence
# pad = [0] * 100# * (max_words - len(list))#np.zeros(max_words - list.size)
# for i in range((max_words - len(list))):
# list.append(pad)
## list.append(pad)
# #print list.shape
# return list
#getEmbedding(df['tokenized_sents'][0])
def run_model():
global tech, politics, sports, music, genre, max_words, depth_embed, depth_distance, word2topic, vocab, K
with K.tf.device('/gpu:1'):
gpu_options = K.tf.GPUOptions(per_process_gpu_memory_fraction=1.0)#0.8)#0.2)
sess = K.tf.Session(config=K.tf.ConfigProto(gpu_options=gpu_options))
# all_topics = np.concatenate((tech, politics, music, sports))
# print "AAAAAAAAAAAAAAAAAAAAA"
# print len(all_topics)
# print all_topics
try:
[X, y, df, d] = pickle.load(open("data_rnn", "r"))
print d
# df = df[df["topic"].isin(all_topics)]
except:
#filename = "Homework2_data.csv"
# word2topic = pickle.load(open("word2topic", "r"))
[df, df_test] = readData(filename1, filename2)
#df = df[df["topic"].isin(all_topics)]
df['sentiment'] = pd.to_numeric(df['sentiment'])
# frame = [df0, df3]
# df_test = pd.concat(frame)
# df_test['sentiment'] = pd.to_numeric(df_test['sentiment'])
# topics_array = np.array(([tech, politics, music, sports]))
# print genre
# for index, row in df.iterrows():
# tweet_topic = row['topic']
# # print "tweet_topic", tweet_topic
# for i in range(len(topics_array)):
# if tweet_topic in topics_array[i]:
# # print "ta", topics_array[i]
# # df["topic"][index] = genre[i]
# df.ix[index, 'topic'] = genre[i]
# # print "df", df["topic"][index]
# break
# Remove topics of no interest
print "length of df is", len(df)
# print "from joined data\n", Counter(list(df["user_id"])).most_common(50)
indices = []
# df['tweet'] = df['tweet'].apply(cleanhtml).apply(cleanUrl).apply(removeMention).apply(removeTrailingHash);
# df['tweet'] = df['tweet'].apply(cleanhtml).apply(cleanUrl).apply(removeTrailingHash);
df['tweet'] = df['tweet'].apply(cleanhtml).apply(cleanUrl)#.apply(removeTrailingHash);
df['tweet'] = tokenize_and_stopwords(df['tweet'])
# df = df.sample(frac=1).reset_index(drop=True)
# df = shuffle(df)
print df.size
df['tokenized_sents'] = df.apply(lambda row: nltk.word_tokenize(row['tweet']), axis=1)
df_test['tweet'] = df_test['tweet'].apply(cleanhtml).apply(cleanUrl)#.apply(removeTrailingHash);
df_test['tweet'] = tokenize_and_stopwords(df_test['tweet'])
# df = df.sample(frac=1).reset_index(drop=True)
# df = shuffle(df)
print df_test.size
df_test['tokenized_sents'] = df_test.apply(lambda row: nltk.word_tokenize(row['tweet']), axis=1)
try:
word2vec = wv.Word2Vec.load("word2vec")
#model.similarity("this", "is")
# model.init_sims(replace=True)
print "loaded"
except:
word2vec = wv.Word2Vec(df["tokenized_sents"], size=depth_embed, window=5, min_count=5, workers=4)
word2vec.save("word2vec")
#X.shape[0]#7349
df['embedding'] = df['tokenized_sents'].apply(getEmbedding, args=(word2topic,))
df['word2vec'] = df['tokenized_sents'].apply(getEmbedding, args=(word2vec.wv,))
X = list(df['embedding'])
X_w = list(df['word2vec'])
X = np.reshape(np.ravel(X), (len(X), max_words, depth_embed))
X_w = np.reshape(np.ravel(X_w), (len(X_w), max_words, depth_embed))
# a = copy.deepcopy(X)#np.array(df['embedding'])
df['tweet_rep'] = df['tokenized_sents'].apply(distance_embed)
#### a = list(df['tweet_rep'])
#### a = np.reshape(np.ravel(a), (len(a), max_words, depth_distance))
df['topic_rep'] = df['topic'].apply(get_topic_rep, args=(word2topic, word2vec,))
df_test['embedding'] = df_test['tokenized_sents'].apply(getEmbedding, args=(word2topic,))
df_test['word2vec'] = df_test['tokenized_sents'].apply(getEmbedding, args=(word2vec.wv,))
X_test_global = list(df_test['embedding'])
X_w_test_global = list(df_test['word2vec'])
X_test_global = np.reshape(np.ravel(X_test_global), (len(X_test_global), max_words, depth_embed))
X_w_test_global = np.reshape(np.ravel(X_w_test_global), (len(X_w_test_global), max_words, depth_embed))
# a = copy.deepcopy(X)#np.array(df['embedding'])
df_test['tweet_rep'] = df_test['tokenized_sents'].apply(distance_embed)
#### a = list(df['tweet_rep'])
#### a = np.reshape(np.ravel(a), (len(a), max_words, depth_distance))
df_test['topic_rep'] = df_test['topic'].apply(get_topic_rep, args=(word2topic, word2vec,))
d = []
# a = np.reshape(a, ())
#### b = list(df['topic_rep'])
#### print b[0]
# print b
# print b.shape
#### b = np.reshape(np.ravel(np.ravel(b)), (X.shape[0], 1, depth_distance))
##### c = (a - b)**2
###### d = c
##### for i1 in range(len(c)):
##### for j1 in range(len(c[0])):
##### d.append(abs(sum(c[i1][j1])))
##### d = np.array(d)
##### d = np.reshape(d, (len(a), max_words))
##### d[d==0] = 0.1
##### d = 1.0 / d
##### print "d[0] is !!!", d[0]
# df['distance'] = d#1.0 / d#sum(sum(sum(abs(np.array(df['embedding']) - np.array(df['topic_rep'])))))
# one_hot =
# df['pos'] = df['tweet'].apply(getPOS)
# X = np.column_stack((np.array(df['embedding']), np.array(df['pos'])))
# for i in range(len(X)):
# X[i] = X[i][0:]
# B = np.array([])
# np.dstack((X, B)).shape
# y = np.array(df['sentiment'])
y = np.array(pd.get_dummies(df['sentiment']))
y_test_global = np.array(pd.get_dummies(df_test['sentiment']))
### No dumping
# try:
# pickle.dump([X, y, df, d], open("data_rnn", "wb"))
# except:
# "dumping data failed"
print len(X[0])
print len(X)
X_train = X#[0:12200]
# X_test = X[12200:]
# X_train = np.concatenate((X_train, X_train_w), axis=1)
X_train_w = X_w#[0:12200]
# X_test_w = X_w[12200:]
y_train = y#[0:12200]
# y_test = y[12200:]
print " Y train!!\n", y_train[0:5]
print list(df['sentiment'])[0:5]
# print y_test[0:5]
print "global", y_test_global[0:5]
## LOAD MODEL
try:
model = load_model('modelc_rnn_new')
one_hot = list(df['topic_rep'])#(pd.get_dummies(df['topic']))
one_hot = np.reshape(np.ravel(np.ravel(one_hot)), (len(one_hot), 1, 2*depth_distance))
one_hot_test_global = list(df_test['topic_rep'])#(pd.get_dummies(df['topic']))
one_hot_test_global = np.reshape(np.ravel(np.ravel(one_hot_test_global)), (len(one_hot_test_global), 1, 2*depth_distance))
except:
# Word model
print "model not found!!"
model_word = Sequential()
model_word.add(Bidirectional(LSTM(3 * max_words, activation='relu', return_sequences=True), input_shape=(max_words, depth_embed)))
model_word.add(Dropout(0.1))
model_word.add(Bidirectional(LSTM(2 * max_words, activation='relu', return_sequences=True)))
model_word.add(Dropout(0.1))
model_word.add(Bidirectional(LSTM(max_words, activation='tanh', return_sequences=True)))
model_word.add(Dropout(0.1))
model_word_w = Sequential()
model_word_w.add(Bidirectional(LSTM(3 * max_words, activation='relu', return_sequences=True), input_shape=(max_words, depth_embed)))
model_word_w.add(Dropout(0.1))
model_word_w.add(Bidirectional(LSTM(2 * max_words, activation='relu', return_sequences=True), input_shape=(max_words, depth_embed)))
model_word_w.add(Dropout(0.1))
model_word_w.add(Bidirectional(LSTM(max_words, activation='tanh', return_sequences=True)))
model_word_w.add(Dropout(0.1))
# model_word.add(Bidirectional(LSTM(max_words, return_sequences=True)))
# model_word.add(Dropout(0.1))
# model_word.add(Flatten())
# model_word.add(MaxPooling2D(pool_size=(2, 1)))
# model_word.add(Dropout(0.1))
# model_word.add(Dense((max_words), activation="tanh"))
## Reverse
# model_word_r = Sequential()
# model_word_r.add(LSTM(max_words, input_shape=(max_words, depth), consume_less='gpu', go_backwards=True))
# model_word_r.add(Dropout(0.1))
## model_word_r.add(LSTM(max_words, input_shape=(max_words, depth), consume_less='gpu', go_backwards=True))
# Topic model
print len(set(df['topic']))
print "set is", set(df['topic'])
# print "topic rep!! \n", df['topic_rep']
one_hot = list(df['topic_rep'])#(pd.get_dummies(df['topic']))
# print df['topic'][0:5]
print "init one hot", one_hot[0:2]
# one_hot = one_hot.as_matrix()
# one_hot = d#df['distance']
print len(one_hot)
# print len(one_hot[0])
# print one_hot[0]
## one_hot = np.reshape(one_hot, (one_hot.shape[0], max_words, 1))
# one_hot = np.reshape(np.ravel(np.ravel(one_hot)), (len(one_hot), depth_distance, 1))
one_hot = np.reshape(np.ravel(np.ravel(one_hot)), (len(one_hot), 1, 2*depth_distance))
one_hot_train = one_hot#[0:12200]
# one_hot_test = one_hot[12200:]
print "one hot shape", one_hot.shape
one_hot_test_global = list(df_test['topic_rep'])#(pd.get_dummies(df['topic']))
one_hot_test_global = np.reshape(np.ravel(np.ravel(one_hot_test_global)), (len(one_hot_test_global), 1, 2*depth_distance))
model_topic = Sequential()
# , return_sequences=True
model_topic.add(Bidirectional(LSTM(max_words, activation='tanh', return_sequences=True), input_shape=(1, 2*depth_distance)))
model_topic.add(Dropout(0.1))
# model_topic.add(Bidirectional(LSTM(max_words, return_sequences=True)))
# model_topic.add(Flatten())
# model_topic.add(MaxPooling2D(pool_size=(2, 1)))
# model_topic.add(Dropout(0.1))
# model_topic.add(Dense(4, activation="tanh"))
# model_topic.add(Dropout(0.1))
# Merge forward and backward
# merged = Merge([model_word_f, model_word_r], mode='concat')#, concat_axis=1)
# model_word = Sequential()
# model_word.add(merged)
# model_word.add(Dropout(0.1))
## model_word.add(MaxPooling2D(pool_size=(2, 1)))
## model_word.add(Dropout(0.1))
# model_word.add(LSTM(max_words, input_shape=(2*max_words, 1)))
# model_word.add(Dropout(0.1))
# Merge merged and topic info
merged2 = Merge([model_word, model_word_w, model_topic], mode='concat', concat_axis=1)
# merged2 = Merge([model_word, model_topic], mode='concat', concat_axis=1)
# merged = Concatenate([model_word, model_topic], axis=-1)
model = Sequential()
model.add(merged2)
# model.add(Dropout(0.1))
model.add(Bidirectional(LSTM(2*max_words, activation='relu', return_sequences=True)))#)))
model.add(Dropout(0.1))
model.add(Bidirectional(LSTM(2*max_words, activation='relu', return_sequences=True)))
# ## # model.add(Flatten())
model.add(Dropout(0.1))
# # model.add(Bidirectional(LSTM(max_words), input_shape=(4 + max_words, 1)))
# print "added additional Dense, no flatten"
### model.add(Dense(max_words, activation='tanh'))
## model.add(Dropout(0.1))
# #model.add(Dense(1, activation='linear', W_constraint=maxnorm(3)))
model.add(Bidirectional(LSTM(2*max_words, activation='tanh', return_sequences=True)))#)))
model.add(Dropout(0.1))
# model.add(Bidirectional(LSTM(max_words, activation='tanh', return_sequences=True)))#)))
# model.add(Dropout(0.1))
model.add(LSTM(3, activation="softmax"))
# model.add(LSTM(1, activation="linear"))
# optimizer = RMSprop(lr=0.01)
# model.compile(loss='categorical_crossentropy', optimizer=optimizer)
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss='categorical_crossentropy', optimizer=adam)
print "Custom!!!"
# model.compile(loss=custom_loss, optimizer=adam)
print "came here saaaaar!!!!!!\n\n"
# print X[0:5]
# print Y_train[0:5]
print "model changedd !!!"
model.fit([X_train, X_train_w, one_hot_train], y_train, batch_size=64, epochs=35, validation_split=0.05, callbacks=[history])
model_json = model.to_json()
with open("modelc_rnn_new.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("modelc_rnn_new.h5")
print("Saved model to disk")
# print(history.History)
return [model, X, X_w, X_test_global, X_w_test_global, y, y_test_global, df, df_test, d, one_hot, one_hot_test_global]
# print X.shape
# print X[0]
# print X[0]
# for i in X[0]:
# print i
def load_model(filename):
json_file = open(filename+ '.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights(filename + ".h5")
# [X, y, df, d] = pickle.load(open("data_rnn", "r"))
return model#, X, y, df, d]
def sentiment_classifier():
global max_words, depth_distance, depth_embed
print "in senti class, changes, class\n\n"
try:
assert False
print "in try\n\n"
[model, X, y, df, d] = load_model('modelc_rnn_new')
print "Data found"
print "done"
except Exception, e:
print "Caught an exception\n\n"
print "Error is", str(e), "\n\n"
[model, X, X_w, X_test_global, X_w_test_global, y, y_test_global, df, df_test, d, one_hot, one_hot_test_global] = run_model()
print len(X_test_global), len(X_w_test_global), len(one_hot_test_global)
print "length of X is", len(X)
# X_test = X#[12200:]
# y_test = y#[12200:]
# X_test_w = X_w#[12200:]
X_train_w = X_w#[0:12200]
X_train = X#[0:12200]
y_train = y#[0:12200]
topics = list(df['topic'])
# ____________________________________________________________________________________________________________HERE_________________
# one_hot = d#df['distance']
# one_hot = pd.get_dummies(df['topic'])
# one_hot = one_hot.as_matrix()
# print len(set(df['topic']))
# print "set is", set(df['topic'])
# print len(one_hot)
# print len(one_hot[0])
# print one_hot[0]
## print len(all_topics)
## print all_topics
# print set(df["topic"])
# one_hot = np.array(df['topic_rep'])#np.array(pd.get_dummies(df['topic']))
# one_hot = np.reshape(one_hot, (X.shape[0], 1, depth_distance))
one_hot_train = one_hot#[0:12200]
# one_hot_test = one_hot[12200:]
one_hot_test = one_hot_test_global
X_test = X_test_global
X_test_w = X_w_test_global
# y_test_global = np.insert(y_test_global, 0, 0, axis=1)
# print y_test.shape, y_test_global.shape
y_test = y_test_global
# X_test = X_test[0:12200]
# X_test_w = X_test_w[0:12200]
# one_hot_test = one_hot_test[0:12200]
print len(X_test), len(X_test_w), len(one_hot_test)
pred = model.predict([X_test, X_test_w, one_hot_test], batch_size = 64)#, Y_train, batch_size=32, verbose=1, sample_weight=None)
print pred[0:5]
print y_test[0:5]
# pred[:, 0] *= 1.5
# margin = 0.06
# indexes = pred[:, 0] + margin >= pred[:, 1]
# print indexes
# pred[indexes, 0] = pred[indexes, 1] + 0.01
# print pred[0:5]
##### print "This is the prediction"
###### y[y >= 0.1] = 1
###### y[y < 0.1] = 0
##### pred.shape = (pred.shape[0],)
##### print pred[0:20]
##### print "true labels"
##### print y_test[0:20]
###### print sum(sum(y == Y_train))
###### print (len(X_train) * len(X_train[0]))
##### print (sum(abs(y_test - pred))) / float(len(pred))
##### thresh1 = 1.5#49#1.8#1.5
##### thresh2 = 3.9
##### thresholded_pred = copy.deepcopy(pred)
##### thresholded_pred[(pred > (-thresh1 + 0.0)) & (pred < thresh2)] = 0
##### thresholded_pred[(pred >= thresh1) & (pred < thresh2)] = 3#1
##### thresholded_pred[pred >= thresh2] = 5#2
##### thresholded_pred[(pred > -thresh2) & (pred <= (-thresh1 + 0.0))] = -3#1
##### thresholded_pred[pred <= -thresh2] = -5#2
##### thresholded_pred = thresholded_pred.astype('int8')
##### print "Testing"
##### evaluate(y_test, thresholded_pred)
#####
##### y_test[y_test > 0] = 1
##### y_test[y_test < 0] = -1
#####
##### thresholded_pred[thresholded_pred > 0] = 1
##### thresholded_pred[thresholded_pred < 0] = -1
thresholded_pred = pred.argmax(axis=1)
y_test = y_test.argmax(axis=1)
evaluate(y_test, thresholded_pred)
thresholded_pred[thresholded_pred<=1] = -1
thresholded_pred[thresholded_pred==2] = 0
thresholded_pred[thresholded_pred>2] = 1
y_test[y_test<=1] = -1
y_test[y_test==2] = 0
y_test[y_test>2] = 1
evaluate(y_test, thresholded_pred)
pred = model.predict([X_train, X_train_w, one_hot_train], batch_size = 64)#, Y_train, batch_size=32, verbose=1, sample_weight=None)
print pred[0:5]
print y_train[0:5]
#pred[:,0] *= 1.5
print "This is the prediction"
#### y[y >= 0.1] = 1
#### y[y < 0.1] = 0
#### pred.shape = (pred.shape[0],)
#### print pred[0:20]
#### print "true labels"
#### print y_train[0:20]
##### print sum(sum(y == Y_train))
##### print (len(X_train) * len(X_train[0]))
#### print (sum(abs(y_train - pred))) / float(len(pred))
#### thresh1 = 1.5
#### thresh2 = 3.9
#### thresholded_pred = copy.deepcopy(pred)
#### thresholded_pred[(pred > (-thresh1 + 0.0)) & (pred < thresh2)] = 0
#### thresholded_pred[(pred >= thresh1) & (pred < thresh2)] = 3#1
#### thresholded_pred[pred >= thresh2] = 5#2
#### thresholded_pred[(pred > -thresh2) & (pred <= (-thresh1 + 0))] = -3#1
#### thresholded_pred[pred <= -thresh2] = -5#2
#### thresholded_pred = thresholded_pred.astype('int8')
#### print "Training"
#### evaluate(y_train, thresholded_pred)
#### y_train[y_train > 0] = 1
#### y_train[y_train < 0] = -1
####
#### thresholded_pred[thresholded_pred > 0] = 1
#### thresholded_pred[thresholded_pred < 0] = -1
thresholded_pred = pred.argmax(axis=1)
y_train = y_train.argmax(axis=1)
evaluate(y_train, thresholded_pred)
thresholded_pred[thresholded_pred<=1] = -1
thresholded_pred[thresholded_pred==2] = 0
thresholded_pred[thresholded_pred>2] = 1
y_train[y_train<=1] = -1
y_train[y_train==2] = 0
y_train[y_train>2] = 1
evaluate(y_train, thresholded_pred)
# model_dup = duplicate_model('modelc_rnn_new')
# layer_output = model_dup.predict([X_test, one_hot_test], batch_size = 64)
#
### get_last_layer_output = K.function([model.layers[0].input],
### [model.layers[2].output])
## get_last_layer_output = K.function([model.layers[0].input, K.learning_phase()],
## [model.layers[2].output])
### output in train mode = 0
### layer_output = np.array(get_last_layer_output([X_train[0:1200], 0])[0])
##
### output in train mode = 0
##
### X = [X_test, one_hot_test]
## print X_test.shape
## print one_hot_test.shape
## print len(X_test)
## print len(one_hot_test)
##
##
## X_2 = np.concatenate((X_test, one_hot_test), axis=2)
## start = 0
## increment = 100
## flag = 1
## print len(X_test)
## print "now!!"
## while start+increment <= len(X_test):
### X = [[X_test[start:start+increment], 1], [one_hot_test[start:start+increment], 1]]
## if flag:
## layer_output = get_last_layer_output([X_2[start:start+increment], 0])[0]#get_last_layer_output([[X_test[start:start+increment], 0], [one_hot_test[:, start:start+increment], 0]])[0]
## flag = 0
## else:
## layer_output = np.concatenate((layer_output, get_last_layer_output([X_2[start:start+increment], 0])[0]))
## start += increment
## if start != len(X_test):
### X = [X_test[start:start+increment], one_hot_test[start:start+increment]]
## layer_output = np.concatenate((layer_output, get_last_layer_output([X_2[start:start+increment], 0])[0]))
# print "length of hidden", len(layer_output[0])
# for iter in range(10):
# print df["tweet"][iter], layer_output[iter]
sentiment_classifier()
| mit |
zaxtax/scikit-learn | sklearn/datasets/tests/test_lfw.py | 55 | 7877 | """This test for the LFW require medium-size data downloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
davidgbe/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 128 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
Cophy08/ggplot | ggplot/tests/__init__.py | 8 | 10135 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib as mpl
import matplotlib.pyplot as plt
from nose.tools import with_setup, make_decorator, assert_true
import warnings
figsize_orig = mpl.rcParams["figure.figsize"]
def setup_package():
mpl.rcParams["figure.figsize"] = (11.0, 8.0)
def teardown_package():
mpl.rcParams["figure.figsize"] = figsize_orig
import os
# Testing framework shamelessly stolen from matplotlib...
# Tests which should be run with 'python tests.py' or via 'must be
# included here.
default_test_modules = [
'ggplot.tests.test_basic',
'ggplot.tests.test_readme_examples',
'ggplot.tests.test_ggplot_internals',
'ggplot.tests.test_geom',
'ggplot.tests.test_stat',
'ggplot.tests.test_stat_calculate_methods',
'ggplot.tests.test_stat_summary',
'ggplot.tests.test_geom_rect',
'ggplot.tests.test_geom_dotplot',
'ggplot.tests.test_geom_bar',
'ggplot.tests.test_qplot',
'ggplot.tests.test_geom_lines',
'ggplot.tests.test_geom_linerange',
'ggplot.tests.test_geom_pointrange',
'ggplot.tests.test_faceting',
'ggplot.tests.test_stat_function',
'ggplot.tests.test_scale_facet_wrap',
'ggplot.tests.test_scale_log',
'ggplot.tests.test_reverse',
'ggplot.tests.test_ggsave',
'ggplot.tests.test_theme_mpl',
'ggplot.tests.test_colors',
'ggplot.tests.test_chart_components',
'ggplot.tests.test_legend',
'ggplot.tests.test_element_target',
'ggplot.tests.test_element_text',
'ggplot.tests.test_theme',
'ggplot.tests.test_theme_bw',
'ggplot.tests.test_theme_gray',
'ggplot.tests.test_theme_mpl',
'ggplot.tests.test_theme_seaborn'
]
_multiprocess_can_split_ = True
# Check that the test directories exist
if not os.path.exists(os.path.join(
os.path.dirname(__file__), 'baseline_images')):
raise IOError(
'The baseline image directory does not exist. '
'This is most likely because the test data is not installed. '
'You may need to install ggplot from source to get the '
'test data.')
def _assert_same_ggplot_image(gg, name, test_file, tol=17):
"""Asserts that the ggplot object produces the right image"""
fig = gg.draw()
return _assert_same_figure_images(fig, name, test_file, tol=tol)
class ImagesComparisonFailure(Exception):
pass
def _assert_same_figure_images(fig, name, test_file, tol=17):
"""Asserts that the figure object produces the right image"""
import os
import shutil
from matplotlib import cbook
from matplotlib.testing.compare import compare_images
from nose.tools import assert_is_not_none
if not ".png" in name:
name = name+".png"
basedir = os.path.abspath(os.path.dirname(test_file))
basename = os.path.basename(test_file)
subdir = os.path.splitext(basename)[0]
baseline_dir = os.path.join(basedir, 'baseline_images', subdir)
result_dir = os.path.abspath(os.path.join('result_images', subdir))
if not os.path.exists(result_dir):
cbook.mkdirs(result_dir)
orig_expected_fname = os.path.join(baseline_dir, name)
actual_fname = os.path.join(result_dir, name)
def make_test_fn(fname, purpose):
base, ext = os.path.splitext(fname)
return '%s-%s%s' % (base, purpose, ext)
expected_fname = make_test_fn(actual_fname, 'expected')
# Save the figure before testing whether the original image
# actually exists. This make creating new tests much easier,
# as the result image can afterwards just be copied.
fig.savefig(actual_fname)
if os.path.exists(orig_expected_fname):
shutil.copyfile(orig_expected_fname, expected_fname)
else:
raise Exception("Baseline image %s is missing" % orig_expected_fname)
err = compare_images(expected_fname, actual_fname,
tol, in_decorator=True)
if err:
msg = 'images not close: {actual:s} vs. {expected:s} (RMS {rms:.2f})'.format(**err)
raise ImagesComparisonFailure(msg)
return err
def get_assert_same_ggplot(test_file):
"""Returns a "assert_same_ggplot" function for these test file
call it like `assert_same_ggplot = get_assert_same_ggplot(__file__)`
"""
def curried(*args, **kwargs):
kwargs["test_file"] = test_file
return _assert_same_ggplot_image(*args, **kwargs)
curried.__doc__ = _assert_same_ggplot_image.__doc__
return curried
def assert_same_elements(first,second, msg=None):
assert_true(len(first) == len(second), "different length")
assert_true(all([a==b for a,b in zip(first,second)]), "Unequal: %s vs %s" % (first, second))
def image_comparison(baseline_images=None, tol=17, extensions=None):
"""
call signature::
image_comparison(baseline_images=['my_figure'], tol=17)
Compare images generated by the test with those specified in
*baseline_images*, which must correspond else an
ImagesComparisonFailure exception will be raised.
Keyword arguments:
*baseline_images*: list
A list of strings specifying the names of the images generated
by calls to :meth:`matplotlib.figure.savefig`.
*tol*: (default 13)
The RMS threshold above which the test is considered failed.
"""
if baseline_images is None:
raise ValueError('baseline_images must be specified')
if extensions:
# ignored, only for compatibility with matplotlibs decorator!
pass
def compare_images_decorator(func):
import inspect
_file = inspect.getfile(func)
def decorated():
# make sure we don't carry over bad images from former tests.
assert len(plt.get_fignums()) == 0, "no of open figs: %s -> find the last test with ' " \
"python tests.py -v' and add a '@cleanup' decorator." % \
str(plt.get_fignums())
func()
assert len(plt.get_fignums()) == len(baseline_images), "different number of " \
"baseline_images and actuall " \
"plots."
for fignum, baseline in zip(plt.get_fignums(), baseline_images):
figure = plt.figure(fignum)
_assert_same_figure_images(figure, baseline, _file, tol=tol)
# also use the cleanup decorator to close any open figures!
return make_decorator(cleanup(func))(decorated)
return compare_images_decorator
def cleanup(func):
"""Decorator to add cleanup to the testing function
@cleanup
def test_something():
" ... "
Note that `@cleanup` is useful *only* for test functions, not for test
methods or inside of TestCase subclasses.
"""
def _teardown():
plt.close('all')
warnings.resetwarnings() #reset any warning filters set in tests
return with_setup(setup=_setup, teardown=_teardown)(func)
# This is called from the cleanup decorator
def _setup():
# The baseline images are created in this locale, so we should use
# it during all of the tests.
import locale
import warnings
from matplotlib.backends import backend_agg, backend_pdf, backend_svg
try:
locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))
except locale.Error:
try:
locale.setlocale(locale.LC_ALL, str('English_United States.1252'))
except locale.Error:
warnings.warn(
"Could not set locale to English/United States. "
"Some date-related tests may fail")
mpl.use('Agg', warn=False) # use Agg backend for these tests
if mpl.get_backend().lower() != "agg" and mpl.get_backend().lower() != "qt4agg":
raise Exception(("Using a wrong matplotlib backend ({0}), which will not produce proper "
"images").format(mpl.get_backend()))
# These settings *must* be hardcoded for running the comparison
# tests
mpl.rcdefaults() # Start with all defaults
mpl.rcParams['text.hinting'] = True
mpl.rcParams['text.antialiased'] = True
#mpl.rcParams['text.hinting_factor'] = 8
# Clear the font caches. Otherwise, the hinting mode can travel
# from one test to another.
backend_agg.RendererAgg._fontd.clear()
backend_pdf.RendererPdf.truetype_font_cache.clear()
backend_svg.RendererSVG.fontd.clear()
# make sure we don't carry over bad plots from former tests
assert len(plt.get_fignums()) == 0, "no of open figs: %s -> find the last test with ' " \
"python tests.py -v' and add a '@cleanup' decorator." % \
str(plt.get_fignums())
# This is here to run it like "from ggplot.tests import test; test()"
def test(verbosity=1):
"""run the ggplot test suite"""
old_backend = mpl.rcParams['backend']
try:
mpl.use('agg')
import nose
import nose.plugins.builtin
from matplotlib.testing.noseclasses import KnownFailure
from nose.plugins.manager import PluginManager
from nose.plugins import multiprocess
# store the old values before overriding
plugins = []
plugins.append( KnownFailure() )
plugins.extend( [plugin() for plugin in nose.plugins.builtin.plugins] )
manager = PluginManager(plugins=plugins)
config = nose.config.Config(verbosity=verbosity, plugins=manager)
# Nose doesn't automatically instantiate all of the plugins in the
# child processes, so we have to provide the multiprocess plugin with
# a list.
multiprocess._instantiate_plugins = [KnownFailure]
success = nose.run( defaultTest=default_test_modules,
config=config,
)
finally:
if old_backend.lower() != 'agg':
mpl.use(old_backend)
return success
test.__test__ = False # nose: this function is not a test
| bsd-2-clause |
andaag/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
raghavrv/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
pyk/rojak | rojak-analyzer/rojak_ovr.py | 4 | 16244 | # Rojak OVR
# This is enhanced version of Rojak SVM
# Rojak SVM only work for pair of candidates
# in Rojak OvR, we embrace all 13 labels
import csv
import sys
import re
import pickle
import itertools
from bs4 import BeautifulSoup
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from sklearn import metrics
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
import numpy as np
import stopwords
# Compile regex to remove non-alphanum char
nonalpha = re.compile('[^a-z\-]+')
# Normalize the word
def normalize_word(w):
word = w.lower()
word = nonalpha.sub(' ', word)
word = word.strip()
return word
# Compile regex to remove author signature from the text
# Example: (nkn/dnu)
author_signature = re.compile('\([a-zA-Z]+/[a-zA-Z]+\)')
# Function to clean the raw string
def clean_string(s):
result_str = []
# Remove the noise: html tag
clean_str = BeautifulSoup(s, 'lxml').text
# Remove the noise: author signature
clean_str = author_signature.sub(' ', clean_str)
# For each word we clear out the extra format
words = clean_str.split(' ')
word_len = len(words)
skipword = False
for i in xrange(word_len):
# Skip negation bigram
# Example: 'tidak_bisa' we skip the 'bisa'
if skipword:
skipword = False
continue
# Current word
w = words[i]
word = normalize_word(w)
# TODO: handle negation & synonym
# Remove -kan in di..kan form
# Example: disebutkan => disebut
if (len(word) > 5 and word[:2] == 'di' and word[-3:] == 'kan'):
word = word[:len(word)-3]
# Remove -kan in me..kan form
# Example: menyetorkan => menyetor
if (len(word) > 5 and word[:2] == 'me' and word[-3:] == 'kan'):
word = word[:len(word)-3]
# Remove -lah form
# Example: bukanlah
if (len(word) > 5 and word[-3:] == 'lah'):
word = word[:len(word)-3]
# Remove -nya form
# Example: bukannya => bukan, disayanginya => disayang,
# komunikasinya => komunikasi
if (len(word) > 5 and word[-3:] == 'nya'):
word = word[:len(word)-3]
# Normalize the negation
if word in ['tidak', 'enggak', 'bukan', 'tdk', 'bkn', 'tak']:
if i < (word_len-2):
word = '{}_{}'.format('tidak', words[i+1])
skipword = True
else:
word = 'tidak'
if word != '' and word != '-':
result_str.append(word)
return ' '.join(result_str).encode('utf-8', 'ignore')
# Given list of news texts, this function will return a sparse matrix
# feature X
def extract_features(news_texts, vocabulary=None, method='tf'):
# We use {uni,bi,tri}gram as feature here
# The feature should appear in at least in 3 docs
vectorizer = CountVectorizer(ngram_range=(1,3),
vocabulary=vocabulary, decode_error='ignore',
min_df=3).fit(news_texts)
X = vectorizer.transform(news_texts)
if method == 'tfidf':
X = TfidfTransformer().fit_transform(X)
return X, vectorizer.get_feature_names()
# Plot confusion matrix
def plot_confusion_matrix(cm, classes, normalize=False, title='',
cmap=pyplot.cm.Blues, classifier_name=''):
pyplot.close('all')
pyplot.imshow(cm, interpolation='nearest', cmap=cmap)
pyplot.title(title)
pyplot.colorbar()
tick_marks = np.arange(len(classes))
pyplot.xticks(tick_marks, classes, rotation=45)
pyplot.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
pyplot.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
pyplot.ylabel('True label')
pyplot.xlabel('Predicted label' + '\n\n' + classifier_name)
pyplot.tight_layout()
full_title = title + ' ' + classifier_name
file_name = '_'.join(full_title.lower().split(' '))
pyplot.savefig(file_name + '.png')
class RojakOvR():
# Storing classifier
classifiers = {}
# Map of label name and the corresponding classifier ID
# We create 6 classifiers, one for each candidates to infer
# the sentiment of the news => positive, negative or oot.
# Each news will run through this 6 classifiers.
classifier_label = {
'pos_agus': 'classifier_agus',
'neg_agus': 'classifier_agus',
'pos_sylvi': 'classifier_sylvi',
'neg_sylvi': 'classifier_sylvi',
'pos_ahok': 'classifier_ahok',
'neg_ahok': 'classifier_ahok',
'pos_djarot': 'classifier_djarot',
'neg_djarot': 'classifier_djarot',
'pos_anies': 'classifier_anies',
'neg_anies': 'classifier_anies',
'pos_sandi': 'classifier_sandi',
'neg_sandi': 'classifier_sandi',
'oot': 'all_classifiers'
}
# Map classifier ID and the training and test data
training_data_text = {}
training_data_class = {}
test_data_text = {}
test_data_class = {}
# Collect the data from csv file
def _collect_data_from_csv(self, input_file, container_text,
container_class):
# Read the input_file
csv_file = open(input_file)
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
# Get the data
try:
title = row['title']
raw_content = row['raw_content']
sentiment_1 = row['sentiment_1']
sentiment_2 = row['sentiment_2']
sentiment_3 = row['sentiment_3']
except KeyError as err:
print 'Cannot load csv:', err
sys.exit()
# Clean the string
clean_title = clean_string(title)
clean_content = clean_string(raw_content)
clean_text = '{} {}'.format(clean_title, clean_content)
# Collect the labels
labels = [sentiment_1, sentiment_2, sentiment_3]
for label in labels:
# Skip unknown label
if not label in self.classifier_label: continue
classifier_id = self.classifier_label[label]
if classifier_id == 'all_classifiers':
for key in self.classifier_label:
if key == 'oot': continue
classifier_id = self.classifier_label[key]
if (classifier_id in container_text
and classifier_id in container_class):
container_text[classifier_id].append(clean_text)
container_class[classifier_id].append(label)
else:
container_text[classifier_id] = [clean_text]
container_class[classifier_id] = [label]
else:
if (classifier_id in container_text
and classifier_id in container_class):
container_text[classifier_id].append(clean_text)
container_class[classifier_id].append(label)
else:
container_text[classifier_id] = [clean_text]
container_class[classifier_id] = [label]
csv_file.close()
# input_file is a path to csv with the following headers:
# 'title', 'raw_content', 'sentiment_1', 'sentiment_2' and 'sentiment_3'
# output_file is a path where the model written into
def train(self, input_file, output_file):
# Collect the training data
self._collect_data_from_csv(input_file, self.training_data_text,
self.training_data_class)
# For each classifier, we extract the features and train the
# classifier
for key in self.training_data_text:
news_texts = self.training_data_text[key]
news_labels = self.training_data_class[key]
# Create feature extractor
feature_extractor = TfidfVectorizer(ngram_range=(1,3),
decode_error='ignore', min_df=3,
stop_words=stopwords.stopwords)
feature_extractor.fit(news_texts)
# For debugging purpose
print '=========='
print key
print '----------'
for word in feature_extractor.get_feature_names():
print word
print '=========='
# Extract the features
X = feature_extractor.transform(news_texts)
y = news_labels
# Train the classifier
classifier = OneVsRestClassifier(LinearSVC(random_state=0))
classifier.fit(X, y)
# Save the classifier
self.classifiers[key] = {
'classifier': classifier,
'feature_extractor': feature_extractor
}
# Save the model as binary file
pickle.dump(self.classifiers, open(output_file, 'w'),
protocol=pickle.HIGHEST_PROTOCOL)
def load_model(self, model):
self.classifiers = pickle.load(open(model))
def eval(self, model, test_data):
# Load the model
self.load_model(model)
# Collect the test data
self._collect_data_from_csv(test_data, self.test_data_text,
self.test_data_class)
# We do the evaluation
for key in self.test_data_text:
news_texts = self.test_data_text[key]
news_labels = self.test_data_class[key]
classifier = self.classifiers[key]['classifier']
feature_extractor = self.classifiers[key]['feature_extractor']
# Extract the features
X = feature_extractor.transform(news_texts)
y_true = news_labels
# Predict
y_pred = classifier.predict(X)
# Evaluate the score
precision = metrics.precision_score(y_true, y_pred,
average='micro')
recall = metrics.recall_score(y_true, y_pred,
average='micro')
f1_score = 2*((precision*recall)/(precision+recall))
print 'classifier:', key
print 'precision:', precision
print 'recall:', recall
print 'f1:', f1_score
# Create the confusion matrix visualization
conf_matrix = metrics.confusion_matrix(y_true, y_pred)
plot_confusion_matrix(conf_matrix,
classes=classifier.classes_,
title='Confusion matrix without normalization',
classifier_name=key)
def predict_proba(self, news_texts, threshold=-1):
result = []
for key in self.classifiers:
classifier = self.classifiers[key]['classifier']
feature_extractor = self.classifiers[key]['feature_extractor']
X = feature_extractor.transform(news_texts)
res = classifier.decision_function(X)
result = result + zip(classifier.classes_, res[0])
return result
if __name__ == '__main__':
rojak = RojakOvR()
rojak.train('data_detikcom_labelled_740.csv', 'rojak_ovr_stopwords_5_model.bin')
rojak.eval('rojak_ovr_stopwords_5_model.bin', 'data_detikcom_labelled_740.csv')
print '== Test'
test_news_texts = ['''
Ogah Ikut 'Perang' Statement di Pilgub DKI, Agus: Menghabiskan Energi
<strong>Jakarta </strong> - Pasangan incumbent DKI Basuki T Purnama (
Ahok) dan Djarot Saiful Hidayat beberapa kali tampak adu statement
dengan pasangan bakal calon Anies Baswedan dan Sandiaga Uno. Kandidat
bakal Cagub DKI Agus Harimurti mengaku tak mau ikut-ikutan terlebih
dahulu. <br> <br> ""Pertama masa kampanye baru dimulai 28 Oktober.
Artinya itu berdasarkan UU itulah yang akan saya gunakan langsung
official untuk menyebarluaskan menyampaikan gagasan visi misi program
kerja dan sebagainya,"" ungkap Agus. <br> <br> Hal tersebut
disampaikannya saat berbincang di redaksi detikcom, Jalan Warung Jati
Barat Raya, Jakarta Selatan, Kamis (6/10/2016). Agus mengaku saat ini
lebih ingin memanfaatkan waktu untuk mensosialisasikan diri sesuai
tahapan KPUD. <br> <br> ""Pada akhirnya tentu saya akan lakukan itu.
Saya menghindari konflik karena hati saya mengatakan buat apa saya
mencari dari kesalahan orang atau terlibat dalam konflik karena
menghabiskan energi,"" ucapnya. <br> <br> Apalagi menurut Agus, ia
berhubungan baik dengan para pasangan calon tersebut. Mantan Danyon 203/
Arya Kemuning itu mengaku ingin fokus menyapa masyarakat bersama dengan
pasangan cawagubnya, Sylvia Murni. <br> <br> ""Saatnya nanti kita akan
langsung ke masyarakat. (Untuk mensosialisasikan) yang saya miliki,
mengapa anda harus memahami dan mengapa ada kepentingan Anda untuk
memilih saya,"" kata Agus. <br> <br> Kehadiran putra sulung Presiden
ke-6 RI Susilo Bambang Yudhoyono (SBY) itu seperti antitesa seorang Ahok
yang dikenal keras. Agus dinilai sebagai sosok yang santun dan membumi. <
br> <br> ""Insya Allah yang saya tampilkan sehari-hari itu apa adanya
saya. Karena saya tidak setuju kalau mengubah karakter yang sudah
dibentuk selama puluhan tahun kemudian dibentuk hanya untuk memenuhi
permintaan pasar atau permintaan media,"" terang dia. <br> <br>
""Artinya saya menjadi sesuatu yang artificial, saya di CFD berlari
menyapa masyarakat itu juga yang sebetulnya saya biasa lakukan dulu
ataupun sebelum saya punya kesibukan di kota lain,"" imbuh Agus. <br> <
br> Mantan perwira berpangkat Mayor itu memastikan penampilan atau sikap
sehari-harinya bukan sebagai sesuatu yang palsu. Agus juga menyatakan
ada banyak aspirasi masyarakat yang ia dapati ketika turun menyapa ke
lapangan. <br> <br> ""Mereka mengekpresikan banyak hal. Yang paling (
saya) senang ya tentu mendoakan 'Pak, semoga sukses'. Tetapi saya tidak
ingin hanya disenangi tapi untuk mencari tahu apa yang menjadi keluhan
dan kebutuhan masyakarat,"" urainya. <br> <br> Lantas apa yang paling
banyak didapat Agus ketika menyapa warga? <br> <br> ""Mereka ingin
kehidupan ekonominya menjadi baik, lingkungan lebih baik, nggak terlalu
macet, bisa memiliki akses kesehatan yang lebih baik. Tapi banyak juga
yang mereka (mengatakan) 'Pak kami ingin dihargai, ingin diayomi,'. As
simpel as that,"" jawab Agus. <br> <br> Pernyataan itu tampaknya seperti
menyindir Ahok yang beberapa kali beradu mulut dengan warga. Ini terkait
dengan kebijakan Ahok yang tidak diterima warga. Tak jarang petahana itu
mengeluarkan kata-kata makian. <br> <br> ""(Warga juga bilang) 'kami
punya harga diri pak. Kami nggak butuh itu, tidak perlu yang
berlebihan-lebihan asalkan kami dihargai sebagai warga masyarakat'.
Sebagai human being yang memiliki hak dan kewajiban yang juga untuk
memajukan daerahnya. Jadi kadang ada yang begitu juga,"" cerita Agus. <
br> <br> Seperti diketahui, Ahok beberapa kali memberi pernyataan
'serangan' kepada pasangan Anies-Sandiaga. Ahok sempat terlibat argumen
lewat media tentang kebersihan sungai di Jakarta. Kemudian Ahok dan
Sandiaga juga 'perang' pernyataan tentang pembuktian harta terbalik.
Terakhir Ahok menyerang dengan mengatakan Sandiaga adalah pengemplang
pajak karena ikut program Tax Amnesty. <br> <br> <iframe src=""http://
tv.detik.com/20detik/embed/161007018/"" frameborder=""0""
scrolling=""no"" width=""420"" height=""236""
allowfullscreen=""allowfullscreen""></iframe> <br> <br> <strong>(ear/
imk)</strong>"
''']
test_news_label = 'pos_agus'
prediction = rojak.predict_proba(test_news_texts)
print 'Text news:'
print test_news_texts
print 'True label:', test_news_label
print 'Prediction:', prediction
| bsd-3-clause |
massmutual/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <ronweiss@gmail.com>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
ssaeger/scikit-learn | examples/neural_networks/plot_mlp_training_curves.py | 56 | 3596 | """
========================================================
Compare Stochastic learning strategies for MLPClassifier
========================================================
This example visualizes some training loss curves for different stochastic
learning strategies, including SGD and Adam. Because of time-constraints, we
use several small datasets, for which L-BFGS might be more suitable. The
general trend shown in these examples seems to carry over to larger datasets,
however.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
# different learning rate schedules and momentum parameters
params = [{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'algorithm': 'adam'}]
labels = ["constant learning-rate", "constant with momentum",
"constant with Nesterov's momentum",
"inv-scaling learning-rate", "inv-scaling with momentum",
"inv-scaling with Nesterov's momentum", "adam"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', 'linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# load / generate some toy datasets
iris = datasets.load_iris()
digits = datasets.load_digits()
data_sets = [(iris.data, iris.target),
(digits.data, digits.target),
datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
datasets.make_moons(noise=0.3, random_state=0)]
for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits',
'circles', 'moons']):
plot_on_dataset(*data, ax=ax, name=name)
fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
plt.show()
| bsd-3-clause |
ahnitz/mpld3 | visualize_tests.py | 15 | 8001 | """
Visualize Test Plots
This script will go through all the plots in the ``mpld3/test_plots``
directory, and save them as D3js to a single HTML file for inspection.
"""
import os
import glob
import sys
import gc
import traceback
import itertools
import json
import contextlib
import matplotlib
matplotlib.use('Agg') # don't display plots
import matplotlib.pyplot as plt
import mpld3
from mpld3 import urls
from mpld3.mpld3renderer import MPLD3Renderer
from mpld3.mplexporter import Exporter
plt.rcParams['figure.figsize'] = (6, 4.5)
plt.rcParams['savefig.dpi'] = 80
TEMPLATE = """
<html>
<head>
<script type="text/javascript" src={d3_url}></script>
<script type="text/javascript" src={mpld3_url}></script>
<style type="text/css">
.left_col {{
float: left;
width: 50%;
}}
.right_col {{
margin-left: 50%;
width: 50%;
}}
.fig {{
height: 500px;
}}
{extra_css}
</style>
</head>
<body>
<div id="wrap">
<div class="left_col">
{left_col}
</div>
<div class="right_col">
{right_col}
</div>
</div>
<script>
{js_commands}
</script>
</body>
</html>
"""
MPLD3_TEMPLATE = """
<div class="fig" id="fig{figid:03d}"></div>
"""
JS_TEMPLATE = """
!function(mpld3){{
{extra_js}
mpld3.draw_figure("fig{figid:03d}", {figure_json});
}}(mpld3);
"""
@contextlib.contextmanager
def mpld3_noshow():
"""context manager to use mpld3 with show() disabled"""
import mpld3
_show = mpld3.show
mpld3.show = lambda *args, **kwargs: None
yield mpld3
mpld3.show = _show
@contextlib.contextmanager
def use_dir(dirname=None):
"""context manager to temporarily change the working directory"""
cwd = os.getcwd()
if dirname is None:
dirname = cwd
os.chdir(dirname)
yield
os.chdir(cwd)
class ExecFile(object):
"""
Class to execute plotting files, and extract the mpl and mpld3 figures.
"""
def __init__(self, filename, execute=True, pngdir='_pngs'):
self.filename = filename
if execute:
self.execute_file()
if not os.path.exists(pngdir):
os.makedirs(pngdir)
basename = os.path.splitext(os.path.basename(filename))[0]
self.pngfmt = os.path.join(pngdir, basename + "_{0:2d}.png")
def execute_file(self):
"""
Execute the file, catching matplotlib figures
"""
dirname, fname = os.path.split(self.filename)
print('plotting {0}'.format(fname))
# close any currently open figures
plt.close('all')
# close any currently open figures
plt.close('all')
with mpld3_noshow() as mpld3:
with use_dir(dirname):
try:
# execute file, forcing __name__ == '__main__'
exec(open(os.path.basename(self.filename)).read(),
{'plt': plt, 'mpld3': mpld3, '__name__': '__main__'})
gcf = matplotlib._pylab_helpers.Gcf
fig_mgr_list = gcf.get_all_fig_managers()
self.figlist = sorted([manager.canvas.figure
for manager in fig_mgr_list],
key=lambda fig: fig.number)
except:
print(80 * '_')
print('{0} is not compiling:'.format(fname))
traceback.print_exc()
print(80 * '_')
finally:
ncol = gc.collect()
def iter_png(self):
for fig in self.figlist:
fig_png = self.pngfmt.format(fig.number)
fig.savefig(fig_png)
yield fig_png
def iter_json(self):
for fig in self.figlist:
renderer = MPLD3Renderer()
Exporter(renderer, close_mpl=False).run(fig)
fig, fig_json, extra_css, extra_js = renderer.finished_figures[0]
yield (json.dumps(fig_json), extra_js, extra_css)
def combine_testplots(wildcard='mpld3/test_plots/*.py',
outfile='_test_plots.html',
pngdir='_pngs',
d3_url=None, mpld3_url=None):
"""Generate figures from the plots and save to an HTML file
Parameters
----------
wildcard : string or list
a regexp or list of regexps matching files to test
outfile : string
the path at which the output HTML will be saved
d3_url : string
the URL of the d3 library to use. If not specified, a standard web
address will be used.
mpld3_url : string
the URL of the mpld3 library to use. If not specified, a standard web
address will be used.
"""
if isinstance(wildcard, str):
filenames = glob.glob(wildcard)
else:
filenames = itertools.chain(*(glob.glob(w) for w in wildcard))
fig_png = []
fig_json = []
for filename in filenames:
result = ExecFile(filename, pngdir=pngdir)
fig_png.extend(result.iter_png())
fig_json.extend(result.iter_json())
left_col = [MPLD3_TEMPLATE.format(figid=i)
for i in range(len(fig_json))]
js_commands = [JS_TEMPLATE.format(figid=figid,
figure_json=figjson,
extra_js=figjs)
for figid, (figjson, figjs, _) in enumerate(fig_json)]
right_col = ['<div class="fig"><img src="{0}"></div>\n'.format(fig)
for fig in fig_png]
extra_css = [tup[2] for tup in fig_json]
print("writing results to {0}".format(outfile))
with open(outfile, 'w') as f:
f.write(TEMPLATE.format(left_col="".join(left_col),
right_col="".join(right_col),
d3_url=json.dumps(d3_url),
mpld3_url=json.dumps(mpld3_url),
js_commands="".join(js_commands),
extra_css="".join(extra_css)))
def run_main():
import argparse
parser = argparse.ArgumentParser(description=("Run files and convert "
"output to D3"))
parser.add_argument("files", nargs='*', type=str)
parser.add_argument("-d", "--d3-url",
help="location of d3 library",
type=str, default=None)
parser.add_argument("-m", "--mpld3-url",
help="location of the mpld3 library",
type=str, default=None)
parser.add_argument("-o", "--output",
help="output filename",
type=str, default='_test_plots.html')
parser.add_argument("-j", "--minjs", action="store_true")
parser.add_argument("-l", "--local", action="store_true")
parser.add_argument("-n", "--nolaunch", action="store_true")
args = parser.parse_args()
if len(args.files) == 0:
wildcard = ['mpld3/test_plots/*.py', 'examples/*.py']
else:
wildcard = args.files
if args.d3_url is None:
args.d3_url = urls.D3_URL
if args.mpld3_url is None:
args.mpld3_url = urls.MPLD3_URL
if args.local:
args.d3_url = urls.D3_LOCAL
if args.minjs:
args.mpld3_url = urls.MPLD3MIN_LOCAL
else:
args.mpld3_url = urls.MPLD3_LOCAL
else:
if args.minjs:
args.mpld3_url = urls.MPLD3MIN_URL
print("d3 url: {0}".format(args.d3_url))
print("mpld3 url: {0}".format(args.mpld3_url))
combine_testplots(wildcard=wildcard,
outfile=args.output,
d3_url=args.d3_url,
mpld3_url=args.mpld3_url)
return args.output, args.nolaunch
if __name__ == '__main__':
outfile, nolaunch = run_main()
if not nolaunch:
# Open local file (works on OSX; maybe not on other systems)
import webbrowser
webbrowser.open_new('file://localhost' + os.path.abspath(outfile))
| bsd-3-clause |
mikelane/FaceRecognition | Sklearn_Face_Recognition/GradientBoosting.py | 1 | 4048 |
# coding: utf-8
# In[2]:
from datetime import datetime
import logging
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import fetch_lfw_people
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.decomposition import PCA
from sklearn.ensemble import GradientBoostingClassifier
import PIL
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
# In[3]:
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print('n_samples: {ns}'.format(ns=n_samples))
print('n_features: {nf}'.format(nf=n_features))
print('n_classes: {}'.format(n_classes))
# In[4]:
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=np.random.RandomState())
# In[5]:
n_components = 150
print("Extracting the top {nc} eigenfaces from {nf} faces".format(nc=n_components, nf=X_train.shape[0]))
start = datetime.now()
pca = PCA(n_components=n_components, svd_solver='randomized', whiten=True).fit(X_train)
print("done in {dur:.3f}s".format(dur=(datetime.now() - start).total_seconds()))
eigenfaces = pca.components_.reshape((n_components, h, w))
print('Projecting the input data on the eigenfaces orthonormal basis')
start = datetime.now()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in {dur:.3f}s".format(dur=(datetime.now() - start).total_seconds()))
# In[6]:
print('Fitting the classifier to the training set')
start = datetime.now()
param_grid = {}
clf = GridSearchCV(GradientBoostingClassifier(), param_grid)
clf = clf.fit(X_train_pca, y_train)
print('done in {dur:.3f}s'.format(dur=(datetime.now() - start).total_seconds()))
print('Best estimator found by grid search:')
print(clf.best_estimator_)
# In[10]:
print("Predicting people's names on the test set")
start = datetime.now()
y_pred = clf.predict(X_test_pca)
y_pred = y_pred.astype(np.int)
print("done in {dur:.3f}s".format(dur=(datetime.now() - start).total_seconds()))
print('\nAccuracy: {:.2f}'.format(accuracy_score(y_test, y_pred)))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
# In[8]:
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
plt.style.use('seaborn-dark')
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
def title(y_pred, y_test, target_names, i):
"""Helper function to extract the prediction titles"""
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: {p}\ntrue: {t}'.format(p=pred_name, t=true_name)
# In[9]:
prediction_titles = [title(y_pred, y_test, target_names, i) for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface {}".format(i) for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
# In[ ]:
| mit |
OpringaoDoTurno/airflow | airflow/contrib/hooks/bigquery_hook.py | 3 | 47635 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
import time
from builtins import range
from past.builtins import basestring
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.hooks.dbapi_hook import DbApiHook
from airflow.utils.log.logging_mixin import LoggingMixin
from apiclient.discovery import HttpError, build
from googleapiclient import errors
from pandas.tools.merge import concat
from pandas_gbq.gbq import \
_check_google_client_version as gbq_check_google_client_version
from pandas_gbq.gbq import _parse_data as gbq_parse_data
from pandas_gbq.gbq import \
_test_google_api_imports as gbq_test_google_api_imports
from pandas_gbq.gbq import GbqConnector
class BigQueryHook(GoogleCloudBaseHook, DbApiHook, LoggingMixin):
"""
Interact with BigQuery. This hook uses the Google Cloud Platform
connection.
"""
conn_name_attr = 'bigquery_conn_id'
def __init__(self, bigquery_conn_id='bigquery_default', delegate_to=None):
super(BigQueryHook, self).__init__(
conn_id=bigquery_conn_id, delegate_to=delegate_to)
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(service=service, project_id=project)
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build('bigquery', 'v2', http=http_authorized)
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(self, bql, parameters=None, dialect='legacy'):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param bql: The BigQuery SQL to execute.
:type bql: string
:param parameters: The parameters to render the SQL query with (not
used, leave to override superclass method)
:type parameters: mapping or iterable
:param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
:type dialect: string in {'legacy', 'standard'}, default 'legacy'
"""
service = self.get_service()
project = self._get_field('project')
connector = BigQueryPandasConnector(project, service, dialect=dialect)
schema, pages = connector.run_query(bql)
dataframe_list = []
while len(pages) > 0:
page = pages.pop()
dataframe_list.append(gbq_parse_data(schema, page))
if len(dataframe_list) > 0:
return concat(dataframe_list, ignore_index=True)
else:
return gbq_parse_data(schema, [])
def table_exists(self, project_id, dataset_id, table_id):
"""
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:type project_id: string
:param dataset_id: The name of the dataset in which to look for the
table.
:type dataset_id: string
:param table_id: The name of the table to check the existence of.
:type table_id: string
"""
service = self.get_service()
try:
service.tables().get(
projectId=project_id, datasetId=dataset_id,
tableId=table_id).execute()
return True
except errors.HttpError as e:
if e.resp['status'] == '404':
return False
raise
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(self,
project_id,
service,
reauth=False,
verbose=False,
dialect='legacy'):
gbq_check_google_client_version()
gbq_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
self.verbose = verbose
self.dialect = dialect
class BigQueryConnection(object):
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def close(self):
""" BigQueryConnection does not have anything to close. """
pass
def commit(self):
""" BigQueryConnection does not support transactions. """
pass
def cursor(self):
""" Return a new :py:class:`Cursor` object using the connection. """
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self):
raise NotImplementedError(
"BigQueryConnection does not have transactions")
class BigQueryBaseCursor(LoggingMixin):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(self, service, project_id):
self.service = service
self.project_id = project_id
self.running_job_id = None
def run_query(self,
bql,
destination_dataset_table=False,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
udf_config=False,
use_legacy_sql=True,
maximum_billing_tier=None,
create_disposition='CREATE_IF_NEEDED',
query_params=None,
schema_update_options=()):
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param bql: The BigQuery SQL to execute.
:type bql: string
:param destination_dataset_table: The dotted <dataset>.<table>
BigQuery table to save the query results.
:param write_disposition: What to do if the table already exists in
BigQuery.
:type write_disposition: string
:param allow_large_results: Whether to allow large results.
:type allow_large_results: boolean
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
:type use_legacy_sql: boolean
:param maximum_billing_tier: Positive integer that serves as a
multiplier of the basic price.
:type maximum_billing_tier: integer
:param create_disposition: Specifies whether the job is allowed to
create new tables.
:type create_disposition: string
:param query_params a dictionary containing query parameter types and
values, passed to BigQuery
:type query_params: dict
:param schema_update_options: Allows the schema of the desitination
table to be updated as a side effect of the query job.
:type schema_update_options: tuple
"""
# BigQuery also allows you to define how you want a table's schema to change
# as a side effect of a query job
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options).issuperset(
set(schema_update_options)):
raise ValueError(
"{0} contains invalid schema update options. "
"Please only use one or more of the following options: {1}"
.format(schema_update_options, allowed_schema_update_options))
configuration = {
'query': {
'query': bql,
'useLegacySql': use_legacy_sql,
'maximumBillingTier': maximum_billing_tier
}
}
if destination_dataset_table:
assert '.' in destination_dataset_table, (
'Expected destination_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(destination_dataset_table)
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_dataset_table,
default_project_id=self.project_id)
configuration['query'].update({
'allowLargeResults':
allow_large_results,
'writeDisposition':
write_disposition,
'createDisposition':
create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
})
if udf_config:
assert isinstance(udf_config, list)
configuration['query'].update({
'userDefinedFunctionResources':
udf_config
})
if query_params:
if use_legacy_sql:
raise ValueError("Query paramaters are not allowed when using "
"legacy SQL")
else:
configuration['query']['queryParameters'] = query_params
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
else:
self.log.info(
"Adding experimental "
"'schemaUpdateOptions': {0}".format(schema_update_options))
configuration['query'][
'schemaUpdateOptions'] = schema_update_options
return self.run_with_configuration(configuration)
def run_extract( # noqa
self,
source_project_dataset_table,
destination_cloud_storage_uris,
compression='NONE',
export_format='CSV',
field_delimiter=',',
print_header=True):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted <dataset>.<table>
BigQuery table to use as the source data.
:type source_project_dataset_table: string
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: string
:param export_format: File format to export.
:type export_format: string
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: string
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: boolean
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration)
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED'):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
(project:|project.)<dataset>.<table>
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If <project> is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: (project:|project.)<dataset>.<table>
:type destination_project_dataset_table: string
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
"""
source_project_dataset_tables = ([
source_project_dataset_tables
] if not isinstance(source_project_dataset_tables, list) else
source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId':
source_project,
'datasetId':
source_dataset,
'tableId':
source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
return self.run_with_configuration(configuration)
def run_load(self,
destination_project_dataset_table,
schema_fields,
source_uris,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_bad_records=0,
quote_character=None,
allow_quoted_newlines=False,
allow_jagged_rows=False,
schema_update_options=(),
src_fmt_configs={}):
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table> BigQuery table to load
data into. If <project> is not included, project will be the project defined
in the connection json.
:type destination_project_dataset_table: string
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV file.
:type quote_character: string
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false).
:type allow_quoted_newlines: boolean
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing trailing columns
are treated as bad records, and if there are too many bad records, an invalid error is
returned in the job result. Only applicable when soure_format is CSV.
:type allow_jagged_rows: bool
:param schema_update_options: Allows the schema of the desitination
table to be updated as a side effect of the load job.
:type schema_update_options: tuple
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
"""
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat
source_format = source_format.upper()
allowed_formats = [
"CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS",
"DATASTORE_BACKUP"
]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
# bigquery also allows you to define how you want a table's schema to change
# as a side effect of a load
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options).issuperset(
set(schema_update_options)):
raise ValueError(
"{0} contains invalid schema update options. "
"Please only use one or more of the following options: {1}"
.format(schema_update_options, allowed_schema_update_options))
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table')
configuration = {
'load': {
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
}
}
if schema_fields:
configuration['load']['schema'] = {'fields': schema_fields}
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
else:
self.log.info(
"Adding experimental "
"'schemaUpdateOptions': {0}".format(schema_update_options))
configuration['load'][
'schemaUpdateOptions'] = schema_update_options
if max_bad_records:
configuration['load']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if quote_character:
src_fmt_configs['quote'] = quote_character
if allow_quoted_newlines:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
src_fmt_to_configs_mapping = {
'CSV': [
'allowJaggedRows', 'allowQuotedNewlines', 'autodetect',
'fieldDelimiter', 'skipLeadingRows', 'ignoreUnknownValues',
'nullMarker', 'quote'
],
'DATASTORE_BACKUP': ['projectionFields'],
'NEWLINE_DELIMITED_JSON': ['autodetect', 'ignoreUnknownValues'],
'AVRO': [],
}
valid_configs = src_fmt_to_configs_mapping[source_format]
src_fmt_configs = {
k: v
for k, v in src_fmt_configs.items() if k in valid_configs
}
configuration['load'].update(src_fmt_configs)
if allow_jagged_rows:
configuration['load']['allowJaggedRows'] = allow_jagged_rows
return self.run_with_configuration(configuration)
def run_with_configuration(self, configuration):
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
jobs = self.service.jobs()
job_data = {'configuration': configuration}
# Send query and wait for reply.
query_reply = jobs \
.insert(projectId=self.project_id, body=job_data) \
.execute()
self.running_job_id = query_reply['jobReference']['jobId']
# Wait for query to finish.
keep_polling_job = True
while (keep_polling_job):
try:
job = jobs.get(
projectId=self.project_id,
jobId=self.running_job_id).execute()
if (job['status']['state'] == 'DONE'):
keep_polling_job = False
# Check if job had errors.
if 'errorResult' in job['status']:
raise Exception(
'BigQuery job failed. Final error was: {}. The job was: {}'.
format(job['status']['errorResult'], job))
else:
self.log.info('Waiting for job to complete : %s, %s',
self.project_id, self.running_job_id)
time.sleep(5)
except HttpError as err:
if err.resp.status in [500, 503]:
self.log.info(
'%s: Retryable error, waiting for job to complete: %s',
err.resp.status, self.running_job_id)
time.sleep(5)
else:
raise Exception(
'BigQuery job status check failed. Final error was: %s',
err.resp.status)
return self.running_job_id
def poll_job_complete(self, job_id):
jobs = self.service.jobs()
try:
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
if (job['status']['state'] == 'DONE'):
return True
except HttpError as err:
if err.resp.status in [500, 503]:
self.log.info(
'%s: Retryable error while polling job with id %s',
err.resp.status, job_id)
else:
raise Exception(
'BigQuery job status check failed. Final error was: %s',
err.resp.status)
return False
def cancel_query(self):
"""
Cancel all started queries that have not yet completed
"""
jobs = self.service.jobs()
if (self.running_job_id and
not self.poll_job_complete(self.running_job_id)):
self.log.info('Attempting to cancel job : %s, %s', self.project_id,
self.running_job_id)
jobs.cancel(
projectId=self.project_id,
jobId=self.running_job_id).execute()
else:
self.log.info('No running BigQuery jobs to cancel.')
return
# Wait for all the calls to cancel to finish
max_polling_attempts = 12
polling_attempts = 0
job_complete = False
while (polling_attempts < max_polling_attempts and not job_complete):
polling_attempts = polling_attempts + 1
job_complete = self.poll_job_complete(self.running_job_id)
if (job_complete):
self.log.info('Job successfully canceled: %s, %s',
self.project_id, self.running_job_id)
elif (polling_attempts == max_polling_attempts):
self.log.info(
"Stopping polling due to timeout. Job with id %s "
"has not completed cancel and may or may not finish.",
self.running_job_id)
else:
self.log.info('Waiting for canceled job with id %s to finish.',
self.running_job_id)
time.sleep(5)
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute()
return tables_resource['schema']
def get_tabledata(self, dataset_id, table_id,
max_results=None, selected_fields=None, page_token=None,
start_index=None):
"""
Get the data of a given dataset.table and optionally with selected columns.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if selected_fields:
optional_params['selectedFields'] = selected_fields
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (self.service.tabledata().list(
projectId=self.project_id,
datasetId=dataset_id,
tableId=table_id,
**optional_params).execute())
def run_table_delete(self, deletion_dataset_table,
ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
(<project>.|<project>:)<dataset>.<table> that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: boolean
:return:
"""
assert '.' in deletion_dataset_table, (
'Expected deletion_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(deletion_dataset_table)
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
tables_resource = self.service.tables() \
.delete(projectId=deletion_project,
datasetId=deletion_dataset,
tableId=deletion_table) \
.execute()
self.log.info('Deleted table %s:%s.%s.', deletion_project,
deletion_dataset, deletion_table)
except HttpError:
if not ignore_if_missing:
raise Exception('Table deletion failed. Table does not exist.')
else:
self.log.info('Table does not exist. Skipping.')
def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(
projectId=project_id, datasetId=dataset_id).execute()
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'] == table_id:
# found the table, do update
self.log.info('Table %s:%s.%s exists, updating.',
project_id, dataset_id, table_id)
return self.service.tables().update(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute()
# If there is a next page, we need to check the next page.
if 'nextPageToken' in tables_list_resp:
tables_list_resp = self.service.tables()\
.list(projectId=project_id,
datasetId=dataset_id,
pageToken=tables_list_resp['nextPageToken'])\
.execute()
# If there is no next page, then the table doesn't exist.
else:
# do insert
self.log.info('Table %s:%s.%s does not exist. creating.',
project_id, dataset_id, table_id)
return self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
def run_grant_dataset_view_access(self,
source_dataset,
view_dataset,
view_table,
source_project=None,
view_project=None):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
# Apply default values to projects
source_project = source_project if source_project else self.project_id
view_project = view_project if view_project else self.project_id
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(
projectId=source_project, datasetId=source_dataset).execute()
access = source_dataset_resource[
'access'] if 'access' in source_dataset_resource else []
view_access = {
'view': {
'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table
}
}
# check to see if the view we want to add already exists.
if view_access not in access:
self.log.info(
'Granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project,
source_dataset)
access.append(view_access)
return self.service.datasets().patch(
projectId=source_project,
datasetId=source_dataset,
body={
'access': access
}).execute()
else:
# if view is already in access, do nothing.
self.log.info(
'Table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project,
source_dataset)
return source_dataset_resource
class BigQueryCursor(BigQueryBaseCursor):
"""
A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249
implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(self, service, project_id):
super(BigQueryCursor, self).__init__(
service=service, project_id=project_id)
self.buffersize = None
self.page_token = None
self.job_id = None
self.buffer = []
self.all_pages_loaded = False
@property
def description(self):
""" The schema description method is not currently implemented. """
raise NotImplementedError
def close(self):
""" By default, do nothing """
pass
@property
def rowcount(self):
""" By default, return -1 to indicate that this is not supported. """
return -1
def execute(self, operation, parameters=None):
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: string
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
bql = _bind_parameters(operation,
parameters) if parameters else operation
self.job_id = self.run_query(bql)
def executemany(self, operation, seq_of_parameters):
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: string
:param parameters: List of dictionary parameters to substitute into the
query.
:type parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def fetchone(self):
""" Fetch the next row of a query result set. """
return self.next()
def next(self):
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if len(self.buffer) == 0:
if self.all_pages_loaded:
return None
query_results = (self.service.jobs().getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
pageToken=self.page_token).execute())
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = ([
_bq_cast(vs['v'], col_types[idx])
for idx, vs in enumerate(dict_row['f'])
])
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.page_token = None
self.job_id = None
self.page_token = None
return None
return self.buffer.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a
list of tuples). An empty sequence is returned when no more rows are available.
The number of rows to fetch per call is specified by the parameter. If it is not given, the
cursor's arraysize determines the number of rows to be fetched. The method should try to
fetch as many rows as indicated by the size parameter. If this is not possible due to the
specified number of rows not being available, fewer rows may be returned.
An :py:class:`~pyhive.exc.Error` (or subclass) exception is raised if the previous call to
:py:meth:`execute` did not produce any result set or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of sequences
(e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def get_arraysize(self):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
return self._buffersize if self.buffersize else 1
def set_arraysize(self, arraysize):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes):
""" Does nothing by default """
pass
def setoutputsize(self, size, column=None):
""" Does nothing by default """
pass
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in parameters.iteritems():
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER' or bq_type == 'TIMESTAMP':
return int(string_field)
elif bq_type == 'FLOAT':
return float(string_field)
elif bq_type == 'BOOLEAN':
assert string_field in set(['true', 'false'])
return string_field == 'true'
else:
return string_field
def _split_tablename(table_input, default_project_id, var_name=None):
assert default_project_id is not None, "INTERNAL: No default project is specified"
def var_print(var_name):
if var_name is None:
return ""
else:
return "Format exception for {var}: ".format(var=var_name)
if table_input.count('.') + table_input.count(':') > 3:
raise Exception(('{var}Use either : or . to specify project '
'got {input}').format(
var=var_print(var_name), input=table_input))
cmpt = table_input.rsplit(':', 1)
project_id = None
rest = table_input
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2 and cmpt[0].count(':') <= 1:
if cmpt[-1].count('.') != 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception(('{var}Expect format of (<project:)<dataset>.<table>, '
'got {input}').format(
var=var_print(var_name), input=table_input))
cmpt = rest.split('.')
if len(cmpt) == 3:
assert project_id is None, ("{var}Use either : or . to specify project"
).format(var=var_print(var_name))
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception(
('{var}Expect format of (<project.|<project:)<dataset>.<table>, '
'got {input}').format(var=var_print(var_name), input=table_input))
if project_id is None:
if var_name is not None:
log = LoggingMixin().log
log.info('Project not included in {var}: {input}; '
'using project "{project}"'.format(
var=var_name,
input=table_input,
project=default_project_id))
project_id = default_project_id
return project_id, dataset_id, table_id
| apache-2.0 |
frank-tancf/scikit-learn | sklearn/tests/test_isotonic.py | 13 | 13122 | import warnings
import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permutation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [1, 1, 2, 3, 4, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
def test_fast_predict():
# test that the faster prediction change doesn't
# affect out-of-sample predictions:
# https://github.com/scikit-learn/scikit-learn/pull/6206
rng = np.random.RandomState(123)
n_samples = 10 ** 3
# X values over the -10,10 range
X_train = 20.0 * rng.rand(n_samples) - 10
y_train = np.less(
rng.rand(n_samples),
1.0 / (1.0 + np.exp(-X_train))
).astype('int64')
weights = rng.rand(n_samples)
# we also want to test that everything still works when some weights are 0
weights[rng.rand(n_samples) < 0.1] = 0
slow_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
fast_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
# Build interpolation function with ALL input data, not just the
# non-redundant subset. The following 2 lines are taken from the
# .fit() method, without removing unnecessary points
X_train_fit, y_train_fit = slow_model._build_y(X_train, y_train,
sample_weight=weights,
trim_duplicates=False)
slow_model._build_f(X_train_fit, y_train_fit)
# fit with just the necessary data
fast_model.fit(X_train, y_train, sample_weight=weights)
X_test = 20.0 * rng.rand(n_samples) - 10
y_pred_slow = slow_model.predict(X_test)
y_pred_fast = fast_model.predict(X_test)
assert_array_equal(y_pred_slow, y_pred_fast)
| bsd-3-clause |
kprestel/PyInvestment | pytech/decorators/decorators.py | 2 | 4087 | from functools import wraps
import pandas as pd
from arctic.chunkstore.chunkstore import ChunkStore
import pytech.utils as utils
from pytech.mongo import ARCTIC_STORE, BarStore
from pytech.utils.exceptions import InvalidStoreError, PyInvestmentKeyError
from pandas.tseries.offsets import BDay
from pytech.data._holders import DfLibName
def memoize(obj):
"""Memoize functions so they don't have to be reevaluated."""
cache = obj.cache = {}
@wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def optional_arg_decorator(fn):
"""Used to **only** to wrap decorators that take optional arguments."""
def wrapped_decorator(*args):
if len(args) == 1 and callable(args[0]):
return fn(args[0])
else:
def real_decorator(decoratee):
return fn(decoratee, *args)
return real_decorator
return wrapped_decorator
def write_chunks(chunk_size='D', remove_ticker=True):
"""
Used to wrap functions that return :class:`pd.DataFrame`s and writes the
output to a :class:`ChunkStore`. It is required that the the wrapped
function contains a column called 'ticker' to use as the key in the db.
:param lib_name: The name of the library to write the
:class:`pd.DataFrame` to.
:param chunk_size: The chunk size to use options are:
* D = Days
* M = Months
* Y = Years
:param remove_ticker: If true the ticker column will be deleted before the
:class:`pd.DataFrame` is returned, otherwise it will remain which is
going to use more memory than required.
:return: The output of the original function.
"""
def wrapper(f):
@wraps(f)
def eval_and_write(*args, **kwargs):
df_lib_name = f(*args, **kwargs)
df = df_lib_name.df
lib_name = df_lib_name.lib_name
try:
# TODO: make this use the fast scalar getter
ticker = df[utils.TICKER_COL][0]
# ticker = df.at[0, pd_utils.TICKER_COL]
except KeyError:
raise PyInvestmentKeyError(
'Decorated functions are required to add a column '
f'{utils.TICKER_COL} that contains the ticker.')
if remove_ticker:
# should this be saved?
df.drop(utils.TICKER_COL, axis=1, inplace=True)
# this is a work around for a flaw in the the arctic DateChunker.
if 'date' not in df.columns or 'date' not in df.index.names:
if df.index.dtype == pd.to_datetime(['2017']).dtype:
df.index.name = 'date'
else:
raise ValueError('df must be datetime indexed or have a'
'column named "date".')
if lib_name not in ARCTIC_STORE.list_libraries():
# create the lib if it does not already exist
ARCTIC_STORE.initialize_library(lib_name,
BarStore.LIBRARY_TYPE)
lib = ARCTIC_STORE[lib_name]
if not isinstance(lib, ChunkStore):
raise InvalidStoreError(required=ChunkStore,
provided=type(lib))
else:
lib.update(ticker, df, chunk_size=chunk_size, upsert=True)
df.index.freq = BDay()
return DfLibName(df, lib_name)
return eval_and_write
return wrapper
class lazy_property(object):
"""
Used for lazy evaluation of an obj attr.
Property should represent non-mutable data, as it replaces itself.
"""
def __init__(self, f):
self.f = f
self.func_name = f.__name__
def __get__(self, obj, cls):
if obj is None:
return None
val = self.f(obj)
setattr(obj, self.func_name, val)
return val
| mit |
vortex-ape/scikit-learn | sklearn/discriminant_analysis.py | 4 | 27767 | """
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from .utils import deprecated
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
# rescale
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_classes, n_features)
Class means.
"""
classes, y = np.unique(y, return_inverse=True)
cnt = np.bincount(y)
means = np.zeros(shape=(len(classes), X.shape[1]))
np.add.at(means, y, X)
means /= cnt[:, None]
return means
def _class_cov(X, y, priors, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
cov = np.zeros(shape=(X.shape[1], X.shape[1]))
for idx, group in enumerate(classes):
Xg = X[y == group, :]
cov += priors[idx] * np.atleast_2d(_cov(Xg, shrinkage))
return cov
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False), used
only in 'svd' solver.
.. versionadded:: 0.17
tol : float, optional, (default 1.0e-4)
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals)
)[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
evecs /= np.linalg.norm(evecs, axis=0)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = (S**2 / np.sum(
S**2))[:self._max_components]
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1) +
np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = np.bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if not np.isclose(self.priors_.sum(), 1.0):
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Get the maximum number of components
if self.n_components is None:
self._max_components = len(self.classes_) - 1
else:
self._max_components = min(len(self.classes_) - 1,
self.n_components)
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
return X_new[:, :self._max_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
store_covariance : boolean
If True the covariance matrices are computed and stored in the
`self.covariance_` attribute.
.. versionadded:: 0.17
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
.. versionadded:: 0.17
store_covariances : boolean
Deprecated, use `store_covariance`.
Attributes
----------
covariance_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariance=False,
store_covariances=None, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariance=False,
tol=1.0e-4, store_covariances=None):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.store_covariance = store_covariance
self.tol = tol
@property
@deprecated("Attribute ``covariances_`` was deprecated in version"
" 0.19 and will be removed in 0.21. Use "
"``covariance_`` instead")
def covariances_(self):
return self.covariance_
def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.19
``store_covariances`` has been moved to main constructor as
``store_covariance``
.. versionchanged:: 0.19
``tol`` has been moved to main constructor.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('The number of classes has to be greater than'
' one; got %d class' % (n_classes))
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
store_covariance = self.store_covariance or self.store_covariances
if self.store_covariances:
warnings.warn("'store_covariances' was renamed to store_covariance"
" in version 0.19 and will be removed in 0.21.",
DeprecationWarning)
if store_covariance:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariance or store_covariance:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariance or store_covariance:
self.covariance_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
fire-rs-laas/fire-rs-saop | python/fire_rs/geodata/display.py | 1 | 19756 | # Copyright (c) 2017, CNRS-LAAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Display GeoData information on matplotlib figures"""
__all__ = ['GeoDataDisplay', 'GeoDataDisplayBase', 'DisplayExtension', 'UAVDisplayExtension']
from collections.abc import Sequence
import datetime
from typing import Optional, Tuple, Type, Union, Dict
import matplotlib
import matplotlib.axis
import matplotlib.cm
import matplotlib.dates
import matplotlib.figure
import matplotlib.ticker
import matplotlib.patches
import matplotlib.pyplot as plt
import matplotlib.transforms
import numpy as np
from matplotlib.colors import LightSource
from matplotlib.ticker import FuncFormatter
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.path import Path
from fire_rs.geodata.geo_data import GeoData
from fire_rs.deprecation import deprecated
house_marker = Path(np.array([[0., 0.],
[1., 0.],
[1., 1.],
[2., 1.],
[2., 0.],
[3., 0.],
[3., 2.],
[1.5, 3.],
[0., 2.],
[0., 0.]]) - np.array([1.5, 1.5]), closed=True)
class EngOffsetFormatter(matplotlib.ticker.EngFormatter):
def __init__(self, unit="", places=None, offset=0):
super().__init__(unit, places)
self._offset = offset
def __call__(self, x, pos=None):
s = "%s%s" % (self.format_eng(x + self._offset), self.unit)
return self.fix_minus(s)
def get_pyplot_figure_and_axis():
fire_fig = plt.figure()
fire_ax = fire_fig.gca(aspect='equal', xlabel="East", ylabel="North")
ax_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
fire_ax.yaxis.set_major_formatter(ax_formatter)
fire_ax.xaxis.set_major_formatter(ax_formatter)
return fire_fig, fire_ax
def plot_uav(ax, position: Tuple[float, float], orientation: float, size=1, **kwargs):
if 'facecolor' not in kwargs:
kwargs['facecolor'] = 'blue'
if 'edgecolor' not in kwargs:
kwargs['edgecolor'] = 'black'
plane_vertices = (np.array([[3.5, 6], [4, 5], [4, 4], [7, 4], [7, 3],
[4, 3], [4, 1], [5, 1], [5, 0], [2, 0],
[2, 1], [3, 1], [3, 3], [0, 3], [0, 4],
[3, 4], [3, 5]]) - np.array([3.5, 3])) * size
r = np.array([[np.cos(orientation), -np.sin(orientation)],
[np.sin(orientation), np.cos(orientation)]])
plane_polygon = matplotlib.patches.Polygon(np.matmul(plane_vertices, r) + position,
closed=True, fill=True, **kwargs)
return ax.add_patch(plane_polygon)
@deprecated
def plot_uav_deprecated(ax, uav_state, size=1, facecolor='blue', edgecolor='black', **kwargs):
plane_vertices = (np.array([[3.5, 6], [4, 5], [4, 4], [7, 4], [7, 3],
[4, 3], [4, 1], [5, 1], [5, 0], [2, 0],
[2, 1], [3, 1], [3, 3], [0, 3], [0, 4],
[3, 4], [3, 5]]) - np.array([3.5, 3])) * size
r = np.array([[np.cos(-uav_state[2] + np.pi / 2), -np.sin(-uav_state[2] + np.pi / 2)],
[np.sin(-uav_state[2] + np.pi / 2), np.cos(-uav_state[2] + np.pi / 2)]])
plane_polygon = matplotlib.patches.Polygon(np.matmul(plane_vertices, r) + uav_state[0:2],
closed=True, fill=True,
facecolor=facecolor, edgecolor=edgecolor, **kwargs)
return ax.add_patch(plane_polygon)
@deprecated
def plot_elevation_contour(ax, x, y, z, **kwargs):
contour = ax.contour(x, y, z, 15, cmap=kwargs.get('cmap', matplotlib.cm.gist_earth))
# labels = plt.clabel(contour, inline=1, fontsize=10)
return contour
@deprecated
def plot_wind_flow(ax, x, y, wx, wy, wvel, **kwargs):
return ax.streamplot(x, y, wx, wy, density=1, linewidth=1, color='dimgrey')
@deprecated
def plot_wind_quiver(ax, x, y, wx, wy, **kwargs):
return ax.quiver(x, y, wx, wy, pivot=kwargs.get('pivot', 'middle'),
color=kwargs.get('color', 'dimgrey'), **kwargs)
class GeoDataDisplayBase:
BACKGROUND_LAYER = 0
BACKGROUND_OVERLAY_LAYER = 100
FOREGROUND_LAYER = 200
FOREGROUND_OVERLAY_LAYER = 300
def __init__(self, figure: 'matplotlib.figure.Figure', axes: 'matplotlib.axes.Axes',
geodata: 'GeoData', frame: 'Optional[Tuple[float, float]]' = None):
"""
Initialize GeoDataDisplayBase
:param figure: a matplotlib figure
:param axes: a matplotlib axis
:param geodata: a geodata
:param frame: an optional reference frame for the figure data.
ie. (0., 0.) for X and Y with local reference instead of Lambert93 by default.
"""
self._figure, self._axes = figure, axes
self._geodata = geodata
if frame is None:
self._frame = (
geodata.x_offset + geodata.cell_width/2, geodata.y_offset + geodata.cell_width/2)
else:
self._frame = frame
x = np.arange(geodata.max_x)
self._x_ticks = (x * geodata.cell_width) + geodata.x_offset + geodata.cell_width/2
y = np.arange(geodata.max_y)
self._y_ticks = (y * geodata.cell_height) + geodata.y_offset + geodata.cell_width/2
x_fmtr = EngOffsetFormatter(
unit='m', offset=-geodata.x_offset - geodata.cell_width / 2 + self._frame[0])
y_fmtr = EngOffsetFormatter(
unit='m', offset=-geodata.y_offset - geodata.cell_width / 2 + self._frame[1])
self._axes.xaxis.set_major_formatter(x_fmtr)
self._axes.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(nbins=8))
self._axes.yaxis.set_major_formatter(y_fmtr)
self._axes.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(nbins=8))
self._axes.set_xlim(self._x_ticks[0], self._x_ticks[-1])
self._axes.set_ylim(self._y_ticks[0], self._y_ticks[-1])
self._image_scale = (
self._x_ticks[0], self._x_ticks[-1], self._y_ticks[0], self._y_ticks[-1])
self._x_mesh, self._y_mesh = np.meshgrid(x, y)
self._drawings = []
self._colorbars = []
self._extensions = {}
@property
def drawings(self):
return self._drawings
@property
def colorbars(self):
return self._colorbars
@property
def figure(self):
return self._figure
@property
def axes(self):
return self._axes
@property
def x_ticks(self):
return self._x_ticks
@property
def y_ticks(self):
return self._y_ticks
@property
def x_mesh(self):
return self._x_mesh
@property
def y_mesh(self):
return self._y_mesh
@property
def image_scale(self):
return self._image_scale
@property
def geodata(self):
return self._geodata
@classmethod
def pyplot_figure(cls, geodata, frame=None):
figure, axis = get_pyplot_figure_and_axis()
return cls(figure, axis, geodata, frame=frame)
def close(self):
plt.close(self.figure)
def clear_axis(self):
self._axes.cla()
# drawings and colorbar references must be cleared to prevent increasing memory usage
self._drawings = []
self._colorbars = []
self._axes.set_aspect('equal')
self._axes.set_xlabel("East")
self._axes.set_ylabel("North")
x_fmtr = EngOffsetFormatter(
unit='m', offset=-self._geodata.x_offset - self._geodata.cell_width/2 + self._frame[0])
y_fmtr = EngOffsetFormatter(
unit='m', offset=-self._geodata.y_offset - self._geodata.cell_width/2 + self._frame[1])
self._axes.xaxis.set_major_formatter(x_fmtr)
self._axes.yaxis.set_major_formatter(y_fmtr)
def add_extension(self, extensionclass: 'Type[DisplayExtension]', extension_args: 'tuple' = (),
extension_kwargs: 'dict' = {}):
ext = extensionclass(self, *extension_args, **extension_kwargs)
self._extensions[ext.name] = ext
def remove_extension(self, ext_name: 'str'):
self._extensions.pop(ext_name) # Remove "extension_name" element from dict
def __getattr__(self, ext_name: 'str'):
return self._extensions[ext_name]
def legend(self):
"""Draw legend of labeled plots"""
self._axes.legend()
class MinuteDateFormatter(matplotlib.dates.DateFormatter):
"""Format date from a timestamp in minutes"""
def __call__(self, x, pos=0):
# TZ should not be used, because ROS only work on localtime
# without any time zone consideration
return datetime.datetime.fromtimestamp(x * 60, None).strftime(self.fmt)
class SecondDateFormatter(matplotlib.dates.DateFormatter):
"""Format date from a timestamp in seconds"""
def __call__(self, x, pos=0):
# TZ should not be used, because ROS only work on localtime
# without any time zone consideration
# WARNING: If the fire start is not dated,
# the displayed hour will be off by the current timezone
# timestamp "0.000000" is shown 1:00 in France (2:00 in summer)
return datetime.datetime.fromtimestamp(x, None).strftime(self.fmt)
class GeoDataDisplay(GeoDataDisplayBase):
"""Draw GeoData information on a matplotlib figure.
'draw_' methods should be called in the order from background to foreground.
"""
def draw_elevation_shade(self, geodata: 'Optional[GeoData]' = None, layer: 'str' = 'elevation',
with_colorbar: 'bool' = False, label: 'str' = "Elevation", **kwargs):
gd = self._geodata if geodata is None else geodata
z = gd[layer].T[::-1, ...]
if 'vmin' not in kwargs:
kwargs['vmin'] = np.nanmin(z)
if 'vmax' not in kwargs:
kwargs['vmax'] = np.nanmax(z)
if 'cmap' not in kwargs:
kwargs['cmap'] = matplotlib.cm.gist_earth
if 'interpolation' not in kwargs:
kwargs['interpolation'] = 'none'
if 'extent' not in kwargs:
kwargs['extent'] = self._image_scale
ls = LightSource(azdeg=315, altdeg=35)
axim = self.axes.imshow(
ls.shade(z, cmap=kwargs['cmap'], blend_mode='soft', vert_exag=1, dx=gd.cell_width,
dy=gd.cell_height, vmin=kwargs['vmin'], vmax=kwargs['vmax']), **kwargs)
self._drawings.append(axim)
if with_colorbar:
self._add_elevation_shade_colorbar(axim, label)
return axim
def _add_elevation_shade_colorbar(self, axim, label):
cb = self._figure.colorbar(axim, ax=self.axes, shrink=0.65, aspect=20, format="%d m")
cb.set_label(label)
self._colorbars.append(cb)
def draw_wind_quiver(self, geodata: 'Optional[GeoData]' = None,
layer: 'Tuple[str, str]' = ('wind_velocity', 'wind_angle'),
skip: 'int' = 10, **kwargs):
gd = self._geodata if geodata is None else geodata
wind_vel = gd[layer[0]][::skip, ::skip]
wind_ang = gd[layer[1]][::skip, ::skip]
wx = wind_vel * np.cos(wind_ang)
wy = wind_vel * np.sin(wind_ang)
if 'pivot' not in kwargs:
kwargs['pivot'] = 'middle'
if 'color' not in kwargs:
kwargs['color'] = 'dimgrey'
w_quiver = self.axes.quiver(*np.meshgrid(self._x_ticks[::skip], self._y_ticks[::skip]),
wx, wy, **kwargs)
self.drawings.append(w_quiver)
return w_quiver
def draw_ignition_contour(self, geodata: 'Optional[GeoData]' = None, layer: 'str' = 'ignition',
time_range: 'Optional[Tuple[float, float]]' = None,
with_labels: 'bool' = False, n_fronts=None, **kwargs):
gd = self._geodata if geodata is None else geodata
igni = np.array(gd[layer])
igni[igni >= np.inf] = np.nan # mask non ignited cells
if time_range:
igni[igni > time_range[1]] = time_range[1]
igni[igni < time_range[0]] = time_range[0]
igni = igni.T
# Determine how many contour lines we are going to draw
lim = (np.nanmin(igni), np.nanmax(igni))
igni[np.isnan(igni)] = np.inf
igni[igni > lim[1]] = lim[1]
igni[igni < lim[0]] = lim[0]
nfronts = int(np.clip(int((lim[1] - lim[0]) / 3600) * 10, 3, 10)) if n_fronts is None \
else n_fronts
if 'cmap' not in kwargs:
kwargs['cmap'] = matplotlib.cm.YlOrRd
# contour(X, Y, Z, N, **kwargs)
firecont = self.axes.contour(self._x_ticks, self._y_ticks, igni, nfronts, **kwargs)
if with_labels:
self._add_ignition_contour_labels(firecont)
return firecont
def _add_ignition_contour_labels(self, firecont):
# self.axes.clabel(firecont, inline=True, inline_spacing=1, linewidth=2, fontsize='smaller',
# fmt='%.0f')
self.axes.clabel(firecont, inline=True, inline_spacing=1, fontsize='smaller',
fmt=SecondDateFormatter('%H:%M'))
def draw_ignition_shade(self, geodata: 'Optional[GeoData]' = None, layer: 'str' = 'ignition',
time_range: 'Optional[Tuple[float, float]]' = None,
with_colorbar: 'bool' = False, label: 'str' = "Ignition time",
**kwargs):
gd = self._geodata if geodata is None else geodata
igni = np.array(gd[layer])
igni[igni >= np.inf] = np.nan # mask non ignited cells
if time_range:
igni[igni > time_range[1]] = np.nan
igni[igni < time_range[0]] = np.nan
igni = np.around(igni.T[::-1, ...], 0) # Convert and clip to exact seconds
if 'vmin' not in kwargs:
kwargs['vmin'] = np.nanmin(igni)
else:
kwargs['vmin'] /= 60.
if 'vmax' not in kwargs:
kwargs['vmax'] = np.nanmax(igni)
else:
kwargs['vmax'] /= 60.
if 'cmap' not in kwargs:
kwargs['cmap'] = matplotlib.cm.YlOrRd
if 'interpolation' not in kwargs:
kwargs['interpolation'] = 'none'
if 'extent' not in kwargs:
kwargs['extent'] = self._image_scale
shade = self.axes.imshow(igni, **kwargs)
self._drawings.append(shade)
if with_colorbar:
self._add_ignition_shade_colorbar(shade, label)
return shade
def _add_ignition_shade_colorbar(self, shade, label: 'str'):
cb = self._figure.colorbar(shade, ax=self.axes, shrink=0.65, aspect=20,
format=SecondDateFormatter('%H:%M'))
cb.set_label(label)
self._colorbars.append(cb)
def draw_base(self, bases: 'Union[[(float, float)], (float, float)]', **kwargs):
"""Draw a point marked as bases in a GeoDataDisplay figure."""
if 's' not in kwargs:
kwargs['s'] = 200
if 'edgecolor' not in kwargs:
kwargs['edgecolor'] = 'black'
return self._draw_scatter(bases, marker=house_marker, **kwargs)
def draw_base_tagged(self, bases: 'Dict[str, Tuple[float, float]]', **kwargs):
"""Draw a point marked as bases in a GeoDataDisplay figure."""
if 's' not in kwargs:
kwargs['s'] = 200
if 'edgecolor' not in kwargs:
kwargs['edgecolor'] = 'black'
return self._draw_scatter(bases, marker=house_marker, **kwargs)
def draw_ignition_points(self, ignition_points: 'Union[[(float, float)], (float, float)]',
**kwargs):
"""Draw one or multiple ignition points in a GeoDataDisplay figure."""
if 'color' not in kwargs:
kwargs['color'] = 'red'
if 'edgecolor' not in kwargs:
kwargs['edgecolor'] = 'black'
if 'marker' not in kwargs:
kwargs['marker'] = 'o'
kwargs["zorder"] = GeoDataDisplayBase.FOREGROUND_OVERLAY_LAYER
return self._draw_scatter(ignition_points, **kwargs)
def _draw_scatter(self, points: 'Union[[(float, float)], (float, float)]', **kwargs):
"""Draw one or multiple points in a GeoDataDisplay figure."""
ip_arr = np.array(points)
scattered = self.axes.scatter(ip_arr[..., 0], ip_arr[..., 1], **kwargs)
self._drawings.append(scattered)
return scattered
def draw_uav(self, position: Tuple[float, float], orientation: float, size=25, **kwargs):
"""Draw a uav symbol in a GeoDataDisplay figure."""
if "zorder" not in kwargs:
kwargs["zorder"] = GeoDataDisplayBase.FOREGROUND_OVERLAY_LAYER
self._drawings.append(plot_uav(self.axes, position, orientation, size, **kwargs))
class DisplayExtension:
def __init__(self, base_display: 'GeoDataDisplayBase', name: 'str'):
self._name = name # type: 'str'
self._base_display = base_display # type: 'GeoDataDisplayBase'
@property
def name(self) -> 'str':
return self._name
@deprecated("Use basic GeoDataDisplay instead")
class UAVDisplayExtension(DisplayExtension):
"""Extension to GeoDataDisplay that draws small uavs"""
def __init__(self, base_display: 'GeoDataDisplayBase', uav_state_list):
super().__init__(base_display, self.__class__.__name__)
self.uav_state_list = uav_state_list
@deprecated("Use draw_uav of GeoDataDisplay instead")
def draw_uav(self, *args, **kwargs):
"""Draw ignition point in a GeoDataDisplay figure."""
for p in self.uav_state_list:
self._base_display.drawings.append(
plot_uav_deprecated(self._base_display.axes, p, *args, **kwargs))
@deprecated
def plot3d_elevation_shade(ax, x, y, z, dx=25, dy=25, **kwargs):
ls = LightSource(azdeg=120, altdeg=45)
rgb = ls.shade(z, cmap=matplotlib.cm.terrain, vert_exag=0.1, blend_mode='overlay')
return ax.plot_surface(x, y, z, facecolors=rgb, rstride=5, cstride=5, linewidth=0,
antialiased=True, shade=True)
@deprecated
def plot3d_wind_arrows(ax, x, y, z, wx, wy, wz, **kwargs):
return ax.quiver(x, y, z, wx, wy, wz, pivot='middle', cmap=matplotlib.cm.viridis)
| bsd-2-clause |
PTDreamer/dRonin | python/ins/compare.py | 11 | 5497 | from cins import CINS
from pyins import PyINS
import unittest
from sympy import symbols, lambdify, sqrt
from sympy import MatrixSymbol, Matrix
from numpy import cos, sin, power
from sympy.matrices import *
from quaternions import *
import numpy
import math
import ins
VISUALIZE = False
class CompareFunctions(unittest.TestCase):
def setUp(self):
self.c_sim = CINS()
self.py_sim = PyINS()
self.c_sim.prepare()
self.py_sim.prepare()
def run_static(self, accel=[0.0,0.0,-PyINS.GRAV],
gyro=[0.0,0.0,0.0], mag=[400,0,1600],
pos=[0,0,0], vel=[0,0,0],
noise=False, STEPS=200000):
""" simulate a static set of inputs and measurements
"""
c_sim = self.c_sim
py_sim = self.py_sim
dT = 1.0 / 666.0
numpy.random.seed(1)
c_history = numpy.zeros((STEPS,16))
c_history_rpy = numpy.zeros((STEPS,3))
py_history = numpy.zeros((STEPS,16))
py_history_rpy = numpy.zeros((STEPS,3))
times = numpy.zeros((STEPS,1))
for k in range(STEPS):
print `k`
ng = numpy.zeros(3,)
na = numpy.zeros(3,)
np = numpy.zeros(3,)
nv = numpy.zeros(3,)
nm = numpy.zeros(3,)
if noise:
ng = numpy.random.randn(3,) * 1e-3
na = numpy.random.randn(3,) * 1e-3
np = numpy.random.randn(3,) * 1e-3
nv = numpy.random.randn(3,) * 1e-3
nm = numpy.random.randn(3,) * 10.0
c_sim.predict(gyro+ng, accel+na, dT=dT)
py_sim.predict(gyro+ng, accel+na, dT=dT)
times[k] = k * dT
c_history[k,:] = c_sim.state
c_history_rpy[k,:] = quat_rpy(c_sim.state[6:10])
py_history[k,:] = py_sim.state
py_history_rpy[k,:] = quat_rpy(py_sim.state[6:10])
if False and k % 60 == 59:
c_sim.correction(pos=pos+np)
py_sim.correction(pos=pos+np)
if False and k % 60 == 59:
c_sim.correction(vel=vel+nv)
py_sim.correction(vel=vel+nv)
if True and k % 20 == 8:
c_sim.correction(baro=-pos[2]+np[2])
py_sim.correction(baro=-pos[2]+np[2])
if True and k % 20 == 15:
c_sim.correction(mag=mag+nm)
py_sim.correction(mag=mag+nm)
self.assertState(c_sim.state, py_sim.state)
if VISUALIZE:
from numpy import cos, sin
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2,2)
k = STEPS
ax[0][0].cla()
ax[0][0].plot(times[0:k:4],c_history[0:k:4,0:3])
ax[0][0].set_title('Position')
plt.sca(ax[0][0])
plt.ylabel('m')
ax[0][1].cla()
ax[0][1].plot(times[0:k:4],c_history[0:k:4,3:6])
ax[0][1].set_title('Velocity')
plt.sca(ax[0][1])
plt.ylabel('m/s')
#plt.ylim(-2,2)
ax[1][0].cla()
ax[1][0].plot(times[0:k:4],c_history_rpy[0:k:4,:])
ax[1][0].set_title('Attitude')
plt.sca(ax[1][0])
plt.ylabel('Angle (Deg)')
plt.xlabel('Time (s)')
#plt.ylim(-1.1,1.1)
ax[1][1].cla()
ax[1][1].plot(times[0:k:4],c_history[0:k:4,10:])
ax[1][1].set_title('Biases')
plt.sca(ax[1][1])
plt.ylabel('Bias (rad/s)')
plt.xlabel('Time (s)')
plt.suptitle(unittest.TestCase.shortDescription(self))
plt.show()
return sim.state, history, times
def assertState(self, c_state, py_state):
""" check that the state is near a desired position
"""
# check position
self.assertAlmostEqual(c_state[0],py_state[0],places=1)
self.assertAlmostEqual(c_state[1],py_state[1],places=1)
self.assertAlmostEqual(c_state[2],py_state[2],places=1)
# check velocity
self.assertAlmostEqual(c_state[3],py_state[3],places=1)
self.assertAlmostEqual(c_state[4],py_state[4],places=1)
self.assertAlmostEqual(c_state[5],py_state[5],places=1)
# check attitude
self.assertAlmostEqual(c_state[0],py_state[0],places=0)
self.assertAlmostEqual(c_state[1],py_state[1],places=0)
self.assertAlmostEqual(c_state[2],py_state[2],places=0)
self.assertAlmostEqual(c_state[3],py_state[3],places=0)
# check bias terms (gyros and accels)
self.assertAlmostEqual(c_state[10],py_state[10],places=2)
self.assertAlmostEqual(c_state[11],py_state[11],places=2)
self.assertAlmostEqual(c_state[12],py_state[12],places=2)
self.assertAlmostEqual(c_state[13],py_state[13],places=2)
self.assertAlmostEqual(c_state[14],py_state[14],places=2)
self.assertAlmostEqual(c_state[15],py_state[15],places=2)
def test_face_west(self):
""" test convergence to face west
"""
mag = [0,-400,1600]
state, history, times = self.run_static(mag=mag, STEPS=50000)
self.assertState(state,rpy=[0,0,90])
if __name__ == '__main__':
selected_test = None
if selected_test is not None:
VISUALIZE = True
suite = unittest.TestSuite()
suite.addTest(CompareFunctions(selected_test))
unittest.TextTestRunner().run(suite)
else:
unittest.main() | gpl-3.0 |
samzhang111/scikit-learn | examples/manifold/plot_manifold_sphere.py | 258 | 5101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
Erotemic/hotspotter | hstpl/mask_creator.py | 1 | 9263 | """
Interactive tool to draw mask on an image or image-like array.
Adapted from matplotlib/examples/event_handling/poly_editor.py
Jan 9 2014: taken from: https://gist.github.com/tonysyu/3090704
"""
from __future__ import division, print_function
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.mlab import dist_point_to_segment
#from matplotlib import nxutils # Depricated
# Scientific
import numpy as np
def _nxutils_points_inside_poly(points, verts):
'nxutils is depricated'
path = matplotlib.path.Path(verts)
return path.contains_points(points)
def verts_to_mask(shape, verts):
print(verts)
h, w = shape[0:2]
y, x = np.mgrid[:h, :w]
points = np.transpose((x.ravel(), y.ravel()))
#mask = nxutils.points_inside_poly(points, verts)
mask = _nxutils_points_inside_poly(points, verts)
return mask.reshape(h, w)
class MaskCreator(object):
"""An interactive polygon editor.
Parameters
----------
poly_xy : list of (float, float)
List of (x, y) coordinates used as vertices of the polygon.
max_ds : float
Max pixel distance to count as a vertex hit.
Key-bindings
------------
't' : toggle vertex markers on and off. When vertex markers are on,
you can move them, delete them
'd' : delete the vertex under point
'i' : insert a vertex at point. You must be within max_ds of the
line connecting two existing vertices
"""
def __init__(self, ax, poly_xy=None, max_ds=10, line_width=2,
line_color=(0, 0, 1), face_color=(1, .5, 0)):
self.showverts = True
self.max_ds = max_ds
if poly_xy is None:
poly_xy = default_vertices(ax)
self.poly = Polygon(poly_xy, animated=True,
fc=face_color, ec='none', alpha=0.4)
ax.add_patch(self.poly)
ax.set_clip_on(False)
ax.set_title("Click and drag a point to move it; "
"'i' to insert; 'd' to delete.\n"
"Close figure when done.")
self.ax = ax
x, y = zip(*self.poly.xy)
#line_color = 'none'
color = np.array(line_color) * .6
marker_face_color = line_color
line_kwargs = {'lw': line_width, 'color': color, 'mfc': marker_face_color}
self.line = plt.Line2D(x, y, marker='o', alpha=0.8, animated=True, **line_kwargs)
self._update_line()
self.ax.add_line(self.line)
self.poly.add_callback(self.poly_changed)
self._ind = None # the active vert
canvas = self.poly.figure.canvas
canvas.mpl_connect('draw_event', self.draw_callback)
canvas.mpl_connect('button_press_event', self.button_press_callback)
canvas.mpl_connect('button_release_event', self.button_release_callback)
canvas.mpl_connect('key_press_event', self.key_press_callback)
canvas.mpl_connect('motion_notify_event', self.motion_notify_callback)
self.canvas = canvas
def get_mask(self, shape):
"""Return image mask given by mask creator"""
mask = verts_to_mask(shape, self.verts)
return mask
def poly_changed(self, poly):
'this method is called whenever the polygon object is called'
# only copy the artist props to the line (except visibility)
vis = self.line.get_visible()
#Artist.update_from(self.line, poly)
self.line.set_visible(vis) # don't use the poly visibility state
def draw_callback(self, event):
#print('[mask] draw_callback(event=%r)' % event)
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
def button_press_callback(self, event):
'whenever a mouse button is pressed'
ignore = not self.showverts or event.inaxes is None or event.button != 1
if ignore:
return
self._ind = self.get_ind_under_cursor(event)
def button_release_callback(self, event):
'whenever a mouse button is released'
ignore = not self.showverts or event.button != 1
if ignore:
return
self._ind = None
def key_press_callback(self, event):
'whenever a key is pressed'
if not event.inaxes:
return
if event.key == 't':
self.showverts = not self.showverts
self.line.set_visible(self.showverts)
if not self.showverts:
self._ind = None
elif event.key == 'd':
ind = self.get_ind_under_cursor(event)
if ind is None:
return
if ind == 0 or ind == self.last_vert_ind:
print('[mask] Cannot delete root node')
return
self.poly.xy = [tup for i, tup in enumerate(self.poly.xy) if i != ind]
self._update_line()
elif event.key == 'i':
xys = self.poly.get_transform().transform(self.poly.xy)
p = event.x, event.y # cursor coords
for i in range(len(xys) - 1):
s0 = xys[i]
s1 = xys[i + 1]
d = dist_point_to_segment(p, s0, s1)
if d <= self.max_ds:
self.poly.xy = np.array(
list(self.poly.xy[:i + 1]) +
[(event.xdata, event.ydata)] +
list(self.poly.xy[i + 1:]))
self._update_line()
break
self.canvas.draw()
def motion_notify_callback(self, event):
'on mouse movement'
ignore = (not self.showverts or event.inaxes is None or
event.button != 1 or self._ind is None)
if ignore:
return
x, y = event.xdata, event.ydata
if self._ind == 0 or self._ind == self.last_vert_ind:
self.poly.xy[0] = x, y
self.poly.xy[self.last_vert_ind] = x, y
else:
self.poly.xy[self._ind] = x, y
self._update_line()
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
def _update_line(self):
# save verts because polygon gets deleted when figure is closed
self.verts = self.poly.xy
self.last_vert_ind = len(self.poly.xy) - 1
self.line.set_data(zip(*self.poly.xy))
def get_ind_under_cursor(self, event):
'get the index of the vertex under cursor if within max_ds tolerance'
# display coords
xy = np.asarray(self.poly.xy)
xyt = self.poly.get_transform().transform(xy)
xt, yt = xyt[:, 0], xyt[:, 1]
d = np.sqrt((xt - event.x) ** 2 + (yt - event.y) ** 2)
indseq = np.nonzero(np.equal(d, np.amin(d)))[0]
ind = indseq[0]
if d[ind] >= self.max_ds:
ind = None
return ind
def default_vertices(ax):
"""Default to rectangle that has a quarter-width/height border."""
xlims = ax.get_xlim()
ylims = ax.get_ylim()
w = np.diff(xlims)
h = np.diff(ylims)
x1, x2 = xlims + w // 4 * np.array([1, -1])
y1, y2 = ylims + h // 4 * np.array([1, -1])
return ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
def apply_mask(img, mask):
masked_img = img.copy()
masked_img[~mask] = np.uint8(np.clip(masked_img[~mask] - 100., 0, 255))
return masked_img
def roi_to_verts(roi):
(x, y, w, h) = roi
verts = np.array([(x + 0, y + h),
(x + 0, y + 0),
(x + w, y + 0),
(x + w, y + h),
(x + 0, y + h)], dtype=np.float32)
return verts
def roi_to_mask(shape, roi):
verts = roi_to_verts(roi)
mask = verts_to_mask(shape, verts)
return mask
def mask_creator_demo(mode=0):
print('*** START DEMO ***')
print('mode = %r' % mode)
try:
from hscom import fileio as io
img = io.imread('/lena.png', 'RGB')
except ImportError as ex:
print('cant read lena: %r' % ex)
img = np.random.uniform(0, 255, size=(100, 100))
ax = plt.subplot(111)
ax.imshow(img)
if mode == 0:
mc = MaskCreator(ax)
# Do interaction
plt.show()
# Make mask from selection
mask = mc.get_mask(img.shape)
# User must close previous figure
elif mode == 1:
from hsgui import guitools
from hsviz import draw_func2 as df2
ax.set_title('Click two points to select an ROI (old mode)')
# Do selection
roi = guitools.select_roi()
# Make mask from selection
mask = roi_to_mask(img.shape, roi)
# Close previous figure
df2.close_all_figures()
# Modify the image with the mask
masked_img = apply_mask(img, mask)
# show the modified image
plt.imshow(masked_img)
plt.title('Region outside of mask is darkened')
print('show2')
plt.show()
if __name__ == '__main__':
import sys
print(sys.argv)
if len(sys.argv) == 1:
mode = 0
else:
mode = 1
mask_creator_demo(mode)
| apache-2.0 |
rodluger/planetplanet | planetplanet/photo/eyeball.py | 1 | 22416 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
eyeball.py |github|
-------------------
Code for plotting and visualizing "eyeball" planets.
.. role:: raw-html(raw)
:format: html
.. |github| replace:: :raw-html:`<a href = "https://github.com/rodluger/planetplanet/blob/master/planetplanet/photo/eyeball.py"><i class="fa fa-github" aria-hidden="true"></i></a>`
'''
from .maps import RadiativeEquilibriumMap, LimbDarkenedMap
from ..constants import *
import numpy as np
np.seterr(invalid = 'ignore')
import matplotlib.pyplot as pl
from matplotlib.transforms import Affine2D
import mpl_toolkits.axisartist.floating_axes as floating_axes
from matplotlib.widgets import Slider
__all__ = ['DrawEyeball', 'DrawOrbit', 'GetAngles']
def rodrigues(v, k, theta):
'''
The Rodrigues rotation formula in 3D, given a vector `v`, a unit vector `k`
normal to the plane of rotation, and the angle of rotation `theta`
in radians.
'''
return v * np.cos(theta) + np.cross(k, v) * np.sin(theta) \
+ k * np.dot(k, v) * (1 - np.cos(theta))
def ucross(u, v):
'''
The cross product of vectors `u` and `v`, normalized to unity.
'''
res = np.cross(u, v)
res /= np.sqrt(np.sum(res ** 2))
return res
def GetAngles(x, y, z, vx, vy, vz, Lambda = 0., Phi = 0.):
'''
Computes the eyeball angles :math:`\\theta` and :math:`\gamma` given the
Cartesian orbital elements and the hotspot offset angles :math:`\Lambda`
and :math:`\Phi`.
:param float x: The **x** component of the planet's position vector
:param float y: The **y** component of the planet's position vector
:param float z: The **z** component of the planet's position vector
:param float vx: The **x** component of the planet's velocity vector
:param float vy: The **y** component of the planet's velocity vector
:param float vz: The **z** component of the planet's velocity vector
:param float Lambda: The longitudinal hotspot offset in radians. \
Default `0`
:param float Phi: The latitudinal hotspot offset in radians. Default `0`
:returns: :math:`\\theta` and :math:`\gamma`, the eyeball phase angles, \
in radians
'''
# The position vector of the center of the planet and its magnitude
r = np.array([x, y, z])
R = np.sqrt(np.sum(r ** 2))
# The velocity vector of the planet and its magnitude
v = np.array([vx, vy, vz])
V = np.sqrt(np.sum(v ** 2))
# The position vector of the substellar point,
# relative to the planet center, normalized to 1
rstar = -r / R
# Vector normal to the longitudinal plane
vlon = ucross(r / R, v / V)
# Apply the longitudinal offset
rstar = rodrigues(rstar, vlon, Lambda)
# Vector normal to the latitudinal plane
vlat = ucross(vlon, rstar)
# Apply the latitudinal offset
rstar = rodrigues(rstar, vlat, Phi)
# The Cartesian coordinates of the hotspot,
# relative to the planet center, normalized to 1
xstar, ystar, zstar = rstar
# Projected distance from planet center to hotspot, normalized to 1
d = min(1, np.sqrt(xstar ** 2 + ystar ** 2))
# Get the rotation and phase angles
gamma = np.arctan2(ystar, xstar) + np.pi
if zstar <= 0:
theta = np.arccos(d)
else:
theta = -np.arccos(d)
return theta, gamma
def LimbDarkenedFlux(lam, z, teff = 2500, limbdark = [1.]):
'''
'''
# Evaluate the limb darkening function if necessary
u = [None for ld in limbdark]
for n, ld in enumerate(limbdark):
if callable(ld):
u[n] = ld(lam)
elif not hasattr(ld, '__len__'):
u[n] = ld
else:
raise Exception("Limb darkening coefficients must be provided "
+ "as a list of scalars or as a list of functions.")
limbdark = u
# Convert to m
lam *= 1e-6
# Compute the normalization term, Equation (E5)
norm = 0
for i, u in enumerate(limbdark):
norm += u / ((i + 2) * (i + 3))
norm = 1 - 2 * norm
a = 2 * HPLANCK * CLIGHT * CLIGHT / (lam * lam * lam * lam * lam)
b = HPLANCK * CLIGHT / (lam * KBOLTZ * teff)
B0 = a / (np.exp(b) - 1.) / norm
# Initialize
flux = B0
cosz = np.cos(z)
# Loop over the coefficient order
for i, u in enumerate(limbdark):
# The Taylor expansion is in (1 - mu)
x = (1 - cosz) ** (i + 1)
# Compute the wavelength-dependent intensity
flux -= u * B0 * x
return flux
def RadiativeEquilibriumFlux(lam, z, albedo = 0.3, tnight = 40.,
irrad = SEARTH):
'''
'''
# Convert to m
lam *= 1e-6
# Compute the temperature
if (z < np.pi / 2):
temp = ((irrad * np.cos(z) * (1 - albedo)) / SBOLTZ) ** 0.25
if (temp < tnight):
temp = tnight
else:
temp = tnight
# Compute the radiance
a = 2 * HPLANCK * CLIGHT * CLIGHT / (lam * lam * lam * lam * lam)
b = HPLANCK * CLIGHT / (lam * KBOLTZ * temp)
return a / (np.exp(b) - 1.)
def DrawEyeball(x0 = 0.5, y0 = 0.5, r = 0.5,
radiancemap = RadiativeEquilibriumMap(),
theta = np.pi / 3, nz = 31, gamma = 0, occultors = [],
cmap = 'inferno', fig = None, draw_terminator = False,
draw_ellipses = False, rasterize = False,
cpad = 0.2, limbdark = [1.], teff = 2500., wavelength = 15.,
color = None):
'''
Creates a floating axis and draws an "eyeball" planet at given
phase and rotation angles.
.. plot::
:align: center
from planetplanet import DrawEyeball
from planetplanet.photo.maps import RadiativeEquilibriumMap
import matplotlib.pyplot as pl
fig = pl.figure(figsize = (3,3))
DrawEyeball(radiancemap = RadiativeEquilibriumMap(), fig = fig)
pl.show()
:param float x0: The `x` position of the center of the planet in \
figure coordinates
:param float y0: The `y` position of the center of the planet in \
figure coordinates
:param float r: The radius of the planet in figure coordinates
:param float theta: The phase angle of the eyeball in radians. \
Default :math:`\pi/3`
:param float gamma: The rotation angle of the eyeball in radians. \
Default `0`
:param int nz: The number of zenith angle wedges. Default `11`
:param array_like occultors: A list of :py:obj:`dict` instances with \
information on each of the occultors to draw. \
Each dictionary must have keywords `x`, `y`, and `r`, corresponding to \
the xy position and radius of the \
occultor, respectively. These are defined relative to the center of the \
planet and scaled so that the \
radius of the planet is unity. Optional keywords are `zorder`, `color`, \
and `alpha`. Default :py:obj:`[]`
:param str cmap: The name of the :py:class:`matplotlib` colormap. \
Default `inferno`
:param fig: The figure object in which to create the axis. \
Default :py:obj:`None`
:type fig: :py:class:`matplotlib.Figure`
:param bool draw_terminator: Draw the terminator ellipse outline? \
Default :py:obj:`False`
:param bool draw_ellipses: Draw the zenith angle ellipse outlines? \
Default :py:obj:`False`
:param bool rasterize: Rasterize the image? Default :py:obj:`False`
:param str color: Occulted body outline color. Default :py:obj:`None`
:return: **fig**, **ax**, **occ**, **xy**. These are the figure and \
floating axis instances, a lits of :py:obj:`Circle` \
instances corresponding to each of the occultors, and a function \
**xy** that performs the rotation \
transformation into the axis-symmetric eyeball frame
'''
# Check the symmetry
if (radiancemap.maptype == MAP_RADIAL_DEFAULT) or \
(radiancemap.maptype == MAP_RADIAL_CUSTOM):
theta = np.pi / 2
gamma = 0
# The rotation transformation, Equation (E6) in the paper
xy = lambda x, y: (x * np.cos(gamma) + y * np.sin(gamma),
y * np.cos(gamma) - x * np.sin(gamma))
# Set up the floating axis
if fig is None:
fig = pl.figure(figsize = (6,6))
tr = Affine2D().rotate_deg(gamma * 180 / np.pi)
x = 1. / (np.abs(np.cos(gamma)) + np.abs(np.sin(gamma)))
scale = max([1] + [occultor['r'] for occultor in occultors])
x *= scale
grid_helper = floating_axes.GridHelperCurveLinear(tr,
extremes=(-x, x, -x, x))
ax_orig = floating_axes.FloatingSubplot(fig, 111,
grid_helper = grid_helper)
ax_orig.set_position([x0 - r, y0 - r, 2 * r, 2 * r])
ax_orig.axis["bottom"].set_visible(False)
ax_orig.axis["top"].set_visible(False)
ax_orig.axis["left"].set_visible(False)
ax_orig.axis["right"].set_visible(False)
ax_orig.patch.set_alpha(0)
fig.add_subplot(ax_orig)
ax = ax_orig.get_aux_axes(tr)
if rasterize:
ax_orig.set_rasterization_zorder(9999)
ax.set_rasterization_zorder(9999)
# Plot the occultors. Note that we need to transform
# their position vectors since we're in a rotated frame.
occ = [None for i in occultors]
for i, occultor in enumerate(occultors):
xo = occultor['x']
yo = occultor['y']
ro = occultor['r']
zo = occultor.get('zorder', 1)
ec = occultor.get('color', 'k')
ao = occultor.get('alpha', 1)
xo, yo = xy(xo, yo)
occ[i] = pl.Circle((xo, yo), ro, color = 'lightgrey',
ec = ec, lw = 2,
alpha = ao, zorder = zo, clip_on = False)
ax.add_artist(occ[i])
# Plot the occulted body
x = np.linspace(-1, 1, 1000)
y = np.sqrt(1 - x ** 2)
if color is not None:
ax.plot(x, y, color = color, zorder = 0, lw = 2, clip_on = False)
ax.plot(x, -y, color = color, zorder = 0, lw = 2, clip_on = False)
# Get the radiance map. If it's one of the default maps,
# we need to call their special Python implementations defined
# above. Otherwise we use the `ctypes` method of the map.
# We will normalize it so that we can use it as a colormap.
if radiancemap.maptype == MAP_RADIAL_DEFAULT:
# Function wrapper to use correct limb darkening parameters
func = lambda lam, z: LimbDarkenedFlux(lam, z, teff = teff,
limbdark = limbdark)
elif radiancemap.maptype == MAP_ELLIPTICAL_DEFAULT:
# TODO: Technically we should pass the albedo and night side
# temperature to the function here But the albedo doesn't matter
# for drawing because of the normalization; the night side
# temperature could matter, but only if it's comparable to the
# dayside temperature, otherwise the nightside is dark no matter what.
func = RadiativeEquilibriumFlux
else:
func = radiancemap.ctypes
zarr = np.linspace(0, np.pi, 100)
rarr = [func(wavelength, za) for za in zarr]
rmax = np.max(rarr)
rmin = np.min(rarr)
rrng = rmax - rmin
rmax += cpad * rrng
rmin -= cpad * rrng
if rrng > 0:
color = lambda z: pl.get_cmap(cmap)((func(wavelength, z) - rmin)
/ (rmax - rmin))
else:
color = lambda z: pl.get_cmap(cmap)(1 - cpad)
# Plot the zenith angle ellipses
zarr = np.linspace(0, np.pi, nz + 2)
for i, z in enumerate(zarr[1:]):
# The ellipse
a = np.abs(np.sin(z))
b = max(0.001, a * np.abs(np.sin(theta)))
xE = -np.cos(z) * np.cos(theta)
xlimb = np.cos(z) * np.sin(theta) * np.tan(theta)
if ((theta > 0) and (b < xlimb)) or ((theta <= 0) and (b > xlimb)):
xmin = xE - b
else:
xmin = xE - xlimb
if ((theta > 0) and (b > -xlimb)) or ((theta <= 0) and (b < -xlimb)):
xmax = xE + b
else:
xmax = xE - xlimb
# Plot it
x = np.linspace(xE - b, xE + b, 1000)
if theta > 0:
x[x < xE - xlimb] = np.nan
elif theta > -np.pi / 2:
x[x > xE - xlimb] = np.nan
A = b ** 2 - (x - xE) ** 2
A[A < 0] = 0
y = (a / b) * np.sqrt(A)
if np.abs(np.cos(z)) < 1e-5:
# This is the terminator
if draw_terminator and scale < 3:
style = dict(color = 'k', ls = '--', lw = 0.5,
zorder = 0, clip_on = False)
ax.plot(x, y, **style)
ax.plot(x, -y, **style)
else:
# These are the ellipse boundaries
if draw_ellipses and scale < 3:
style = dict(color = 'k', ls = '-', lw = 0.5,
zorder = 0, clip_on = False)
ax.plot(x, y, **style)
ax.plot(x, -y, **style)
# Fill the ellipses
if theta < 0:
ax.fill_between(x, -y, y, color = color(zarr[i+1]),
zorder = 0.5 * (z / np.pi - 1), clip_on = False)
else:
ax.fill_between(x, -y, y, color = color(zarr[i]),
zorder = 0.5 * (-z / np.pi - 1), clip_on = False)
# Fill the ellipses that are cut off by the limb
if theta < 0:
x_ = np.linspace(-1, xE - xlimb, 1000)
y_ = np.sqrt(1 - x_ ** 2)
ax.fill_between(x_, -y_, y_, color = color(zarr[i]),
zorder = 0.5 * (-z / np.pi - 1), clip_on = False)
else:
x_ = np.linspace(-1, xE - xlimb, 1000)
y_ = np.sqrt(1 - x_ ** 2)
ax.fill_between(x_, -y_, y_, color = color(zarr[i]),
zorder = 0.5 * (-z / np.pi - 1), clip_on = False)
return fig, ax, occ, xy
def DrawOrbit(radiancemap = RadiativeEquilibriumMap(), inc = 70., Omega = 0.,
ecc = 0., w = 0., Phi = 0., Lambda = 0., nphases = 20, size = 1,
draw_orbit = True, draw_orbital_vectors = True,
plot_phasecurve = False, label_phases = False,
figsize = (8, 8), **kwargs):
'''
Draw an "eyeball" planet's orbit on the sky, illustrating the changing
phases over the course of the orbit.
.. plot::
:align: center
from planetplanet import DrawOrbit
import matplotlib.pyplot as pl
DrawOrbit(Omega = 45., size = 2, figsize = (6, 6))
pl.show()
:param float inc: The orbital inclination in degrees. Default `70.`
:param float Omega: The longitude of ascending node in degrees. \
Default `0.`
:param float ecc: The orbital eccentricity. Default `0.`
:param float w: The longitude of pericenter in degrees. Default `0.`
:param float Phi: The latitudinal hot spot offset in degrees. Default `0.`
:param float Lambda: The longitudinal hot spot offset in degrees. \
Default `0.`
:param int nphases: The number of planets to draw at different phases. \
Default `20`
:param float size: The size of the planets in arbitrary units. Default `1.`
:param bool draw_orbit: Draw the orbit outline? Default :py:obj:`True`
:param bool draw_orbital_vectors: Draw the orbital radial vectors? \
Default :py:obj:`True`
:param bool plot_phasecurve: Compute and plot the phase curve for one \
orbit? Default :py:obj:`False`
:param bool label_phases: Label each of the phases? Default :py:obj:`False`
:param tuple figsize: The size of the figure in inches. \
Default :py:obj:`(8, 8)`
:param dict kwargs: Any other :py:obj:`kwargs` to be passed to \
:py:func:`DrawEyeball`
:returns: `fig`, `axes`, and optionally `figphase`, `axphase`; these are \
all the figure and axes objects generated by the function
'''
# Convert the hotspot angles to radians
Lambda *= np.pi / 180
Phi *= np.pi / 180
# Get the orbital elements over a full orbit of the planet
# We are assuming a period of 10 days, but it doesn't matter for the plot!
# We make the star tiny so that secondary eclipse is negligible
from . import Star, Planet, System
star = Star('A', r = 0.01)
b = Planet('b', per = 10., inc = inc, Omega = Omega, t0 = 0, ecc = ecc,
w = w, Phi = Phi, Lambda = Lambda, airless = True,
phasecurve = True)
system = System(star, b, mintheta = 0.001)
time = np.linspace(-5, 5, 1000)
if plot_phasecurve:
system.compute(time)
else:
system.compute_orbits(time)
# Plot stuff
fig, ax = pl.subplots(1, figsize = figsize)
ax.margins(0.1, 0.1)
# Phase curve
if plot_phasecurve:
figphase, axphase = pl.subplots(1, figsize = (8, 2))
figphase.subplots_adjust(bottom = 0.3)
axphase.plot(np.linspace(0, 1, len(b.time)), b.flux[:,0]
/ (np.nanmax(b.flux[:,0])), 'k-')
axphase.set_xlabel('Orbital phase', fontweight = 'bold', fontsize = 12)
axphase.set_ylabel('Relative flux', fontweight = 'bold', fontsize = 12)
# Orbit outline
if draw_orbit:
ax.plot(b.x, b.y, 'k-', alpha = 0.5)
# Adjust the figure dimensions so the aspect ratio is unity
left = 0.125
right = 0.9
xmin, xmax = ax.get_xlim()
if (xmax - xmin) < 2:
xmax = 1
xmin = -1
ymin, ymax = ax.get_ylim()
if (ymax - ymin) < 2:
ymax = 1
ymin = -1
width = right - left
height = width * (ymin - ymax) / (xmin - xmax)
bottom = 0.5 - height / 2
top = 0.5 + height / 2
fig.subplots_adjust(left = left, right = right, bottom = bottom, top = top)
ax.axis('off')
# Get the indices of the images we'll plot, sorted by zorder
inds = np.array(list(range(0, 1000, 1000 // nphases)), dtype = int)
inds = inds[np.argsort([-b.z[i] for i in inds])]
# Plot images at different phases
axes = [ax]
for i in inds:
# Get the eyeball angles
theta, gamma = GetAngles(b.x[i], b.y[i], b.z[i], b.vx[i], b.vy[i],
b.vz[i], Lambda = Lambda, Phi = Phi)
# Plot the radial vector
if draw_orbital_vectors:
ax.plot([0, b.x[i]], [0, b.y[i]], 'k-', alpha = 0.5, lw = 1)
# Get the figure coordinates of the point
disp_coords = ax.transData.transform((b.x[i], b.y[i]))
xf, yf = fig.transFigure.inverted().transform(disp_coords)
# Draw the planet
_, tmp, _, _ = DrawEyeball(xf, yf, 0.015 * size, radiancemap,
theta = theta, gamma = gamma, fig = fig, **kwargs)
axes.append(tmp)
# Indicate the orbital phase
if label_phases:
dx = b.x[i] #/ r
dy = b.y[i] #/ r
dr = np.sqrt(dx ** 2 + dy ** 2)
dx *= 16 * size / dr
dy *= 16 * size / dr
tmp.annotate("%.2f" % (i / 1000.), xy = (0, 0), xytext = (dx, dy),
xycoords = 'data', textcoords = 'offset points',
fontsize = 8, ha = 'center', va = 'center',
zorder = 10000)
if plot_phasecurve:
return fig, axes, figphase, axphase
else:
return fig, axes
class Interact(object):
'''
Generates an interactive "eyeball" planet viewer, where the user can
change the orbital phase and the latitudinal/longitudinal hot spot
offset angles.
.. plot::
:align: center
from planetplanet.photo.eyeball import Interact
Interact()
:param dict kwargs: Any :py:obj:`kwargs` to be passed to \
:py:func:`DrawEyeball`
'''
def __init__(self, **kwargs):
'''
'''
self.kwargs = kwargs
self.fig = pl.figure(figsize = (6,6))
self.fig.subplots_adjust(bottom = 0.25)
self.axtheta = pl.axes([0.3, 0.05, 0.44, 0.03])
self.theta = Slider(self.axtheta, r'$\theta$', -180., 180.,
valinit = 90.)
self.axphi = pl.axes([0.3, 0.1, 0.44, 0.03])
self.phi = Slider(self.axphi, r'$\Phi$', -90, 90., valinit = 0.)
self.axlam = pl.axes([0.3, 0.15, 0.44, 0.03])
self.lam = Slider(self.axlam, r'$\Lambda$', -90, 90., valinit = 0.)
self.theta.on_changed(self._update)
self.phi.on_changed(self._update)
self.lam.on_changed(self._update)
self._update(90.)
pl.show()
def _update(self, val):
'''
'''
# Remove all axes except sliders
for ax in self.fig.get_axes():
if ax not in [self.axtheta, self.axphi, self.axlam]:
ax.remove()
# Get the angles
theta = self.theta.val * np.pi / 180
Lambda = self.lam.val * np.pi / 180
Phi = self.phi.val * np.pi / 180
# The coordinates of the substellar point
x_ss = -np.cos(theta + Lambda) * np.cos(Phi)
y_ss = np.sin(Phi)
# The rotation angle that makes the planet symmetric about the x-axis
gamma = -np.arctan2(y_ss, -x_ss)
# Compute the new effective theta
if theta + Lambda < -np.pi:
theta = 2 * np.pi - np.arccos(-x_ss * np.cos(gamma) - y_ss
* np.sin(gamma))
elif theta + Lambda < 0:
theta = -np.arccos(-x_ss * np.cos(gamma) - y_ss * np.sin(gamma))
elif theta + Lambda > np.pi:
theta = -np.arccos(-x_ss * np.cos(gamma) - y_ss * np.sin(gamma))
else:
theta = np.arccos(-x_ss * np.cos(gamma) - y_ss * np.sin(gamma))
# Plot the planet
DrawEyeball(0.525, 0.6, 0.3, RadiativeEquilibriumMap(),
fig = self.fig, theta = theta, gamma = gamma,
**self.kwargs) | gpl-3.0 |
degoldschmidt/ribeirolab-codeconversion | python/flyPAD/fastrms.py | 1 | 2967 | import numpy as np
def fastrms(x, window = 5, dim = -1, ampl = 0):
"""
FASTRMS Instantaneous root-mean-square (RMS) power via convolution. (Translated from MATLAB script by Scott McKinney, 2011)
Input Parameters:
x: input signal (array-like)
window: length of LENGTH(WINDOW)-point rectangular window
dim: operates along specified dimension (default: -1 [along dimension with most elements])
ampl: if non-zero, ampl applies a correction so that the output RMS reflects the equivalent amplitude of a sinusoidal input signal (default: 0)
"""
# choose dimension with more elements
indim = len(x.shape) # dimension of input array
rows = x.shape[0] # number of rows in input array
if indim > 1:
cols = x.shape[1] # number of columns in input array
if dim == -1 and cols >= rows: # for matrix, if #rows > #cols
dim = 1
else:
dim = 0
#if indim > 1:
#window = np.ones((window, x.shape[dim])) # if matrix
#else:
window = np.ones(window) # rectangular window
power = x**2 # signal power
if indim < 2: # for vectors
rms = np.convolve(power, window, 'same') # convolve signal power with window
else: # for matrices
rms = np.zeros(x.shape)
if dim==0:
for col in range(cols):
rms[:, col] = np.convolve(power[:, col], window, 'same') # convolves along columns
else:
for row in range(rows):
rms[row, :] = np.convolve(power[row, :], window, 'same'); # convolves along rows
rms = np.sqrt(rms/np.sum(window)) # normalize root-mean-square
if ampl:
rms = np.sqrt(2)*rms # amplitude correction term
return rms
"""
% Fs = 200; T = 5; N = T*Fs; t = linspace(0,T,N);
% noise = randn(N,1);
% [a,b] = butter(5, [9 12]/(Fs/2));
% x = filtfilt(a,b,noise);
% window = gausswin(0.25*Fs);
% rms = fastrms(x,window,[],1);
% plot(t,x,t,rms*[1 -1],'LineWidth',2);
% xlabel('Time (sec)'); ylabel('Signal')
% title('Instantaneous amplitude via RMS')
import matplotlib.pyplot as plt
time = np.arange(1000)
print(time.shape)
x = 0.5*np.sin(0.05*time)+0.3*np.sin(0.3*time)+0.1*np.sin(0.9*time)
y = fastrms(x)
plt.plot(time, x, 'r-', label="Raw data")
plt.plot(time, y, 'b-', label="RMS")
plt.legend()
plt.show()
"""
| gpl-3.0 |
aavanian/bokeh | bokeh/sampledata/tests/test_autompg.py | 2 | 2520 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import pandas as pd
# Bokeh imports
from bokeh.util.testing import verify_all
# Module under test
#import bokeh.sampledata.autompg as bsa
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'autompg',
'autompg_clean',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.autompg", ALL))
@pytest.mark.sampledata
def test_autompg():
import bokeh.sampledata.autompg as bsa
assert isinstance(bsa.autompg, pd.DataFrame)
# check detail for package data
assert len(bsa.autompg) == 392
assert all(x in [1,2,3] for x in bsa.autompg.origin)
@pytest.mark.sampledata
def test_autompg_clean():
import bokeh.sampledata.autompg as bsa
assert isinstance(bsa.autompg_clean, pd.DataFrame)
# check detail for package data
assert len(bsa.autompg_clean) == 392
assert all(x in ['North America', 'Europe', 'Asia'] for x in bsa.autompg_clean.origin)
for x in ['chevy', 'chevroelt', 'maxda', 'mercedes-benz', 'toyouta', 'vokswagen', 'vw']:
assert x not in bsa.autompg_clean.mfr
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
| bsd-3-clause |
kiryx/pagmo | PyGMO/examples/benchmark_racing.py | 8 | 8309 | from PyGMO import *
import numpy as np
import matplotlib.pyplot as plt
import copy
# stochastic_type = 'NOISY'
stochastic_type = 'ROBUST'
base_problem = problem.ackley(10)
class post_eval:
"""
Obtain the post-evaluated fitness via repeated averaing over different seeds.
"""
def __init__(self, post_eval_prob, post_eval_n=500, seed=5):
self.post_eval_prob = post_eval_prob
self.post_eval_n = post_eval_n
self.seed = seed
def objfun(self, x):
post_f = 0
np.random.seed(self.seed)
for i in range(self.post_eval_n):
self.post_eval_prob.seed = np.random.randint(1000000)
post_f += self.post_eval_prob.objfun(x)[0] / \
float(self.post_eval_n)
return (post_f,)
def start_experiment(
num_trials=20,
pop_size=40,
fevals_max=100000,
nr_eval_per_x=40,
noise_level=0.05,
seed=123):
# 1. Set up the problem
if(stochastic_type == 'NOISY'):
prob_single_eval = problem.noisy(
base_problem,
trials=1,
param_second=noise_level,
noise_type=problem.noisy.noise_distribution.UNIFORM)
prob_regular = problem.noisy(
base_problem,
trials=nr_eval_per_x,
param_second=noise_level,
noise_type=problem.noisy.noise_distribution.UNIFORM)
else:
prob_single_eval = problem.robust(
base_problem, trials=1, rho=noise_level)
prob_regular = problem.robust(
base_problem, trials=nr_eval_per_x, rho=noise_level)
prob_post_eval = post_eval(prob_single_eval, post_eval_n=500)
"""
Notes for SAME_PROB: Both algorithm will operate on the same version of the
problem (same n_eval). pso_gen will evolve for fevals_max/2*pop_size times;
pso_gen_racing will evolve until fevals_max is hit. In this case a single
feval count in is referred to a single call to objfun() of the problem
(with n_eval as 10).
"""
SAME_PROB = False
# 2A. Set up pso_gen algorithm without racing:
# Each generation of pso_gen requires 2*pop_size*nr_eval_per_x
# evaluations. Ignoring the cost of initialization here.
# NOTE: No need to scale down if both algo has the same version of problem
if SAME_PROB:
gen_budget = fevals_max / (2 * pop_size)
else:
gen_budget = fevals_max / (2 * pop_size * nr_eval_per_x)
print('Non-racing pso gen will evolve for %d generations' % gen_budget)
algo_psogen = algorithm.pso_gen(
gen_budget, 0.7298, 2.05, 2.05, 0.05, 5, 2, 4)
# 2B. Set up pso_gen algorithm with racing:
# Setting gen number to be an arbitrarily large number, let fevals
# decide when to terminate.
nr_eval_per_x_racing = nr_eval_per_x
algo_psogen_racing = algorithm.pso_gen_racing(
1000000,
0.7298,
2.05,
2.05,
0.05,
5,
2,
4,
nr_eval_per_x_racing,
fevals_max)
# TODO: Use below to check the sanity of racing in factoring out the effect of exceeded fevals
# algo_with_racing = algorithm.pso_gen_racing(gen_budget,0.7298,2.05,2.05,0.05,5,2,4,nr_eval_per_x_racing,999999999)
# 3. Run both algorithms and record their performance
if SAME_PROB:
algo_prob_pairs = [
(algo_psogen, prob_regular), (algo_psogen_racing, prob_regular)]
else:
algo_prob_pairs = [
(algo_psogen,
prob_regular),
(algo_psogen_racing,
prob_single_eval)]
post_evaluated_fitnesses = []
np.random.seed(seed)
for i in range(num_trials):
print('::: Trial #%d :::' % i)
results = []
seed += np.random.randint(100000)
for algo, prob in algo_prob_pairs:
algo.reset_rngs(seed)
# Seed used to ensure both algorithm evolves an identical
# population
pop = population(prob, pop_size, seed)
pop = algo.evolve(pop)
# winner_idx = pop.race(1)[0][0];
# print("race winner", winner_idx, "vs champion idx", pop.get_best_idx())
# champion_true_fitness = prob_orig.objfun(pop[winner_idx].cur_x)
champion_true_fitness = prob_post_eval.objfun(pop.champion.x)[0]
# print('Final champion =', champion_true_fitness)
results.append(champion_true_fitness)
print(results)
post_evaluated_fitnesses.append(results)
post_evaluated_fitnesses = list(zip(*post_evaluated_fitnesses))
averaged_no_racing = np.mean(post_evaluated_fitnesses[0])
averaged_racing = np.mean(post_evaluated_fitnesses[1])
print('----------------------------------------------')
print('Final averaged actual fitness over %d trials:' % num_trials)
print('pso_gen without racing: %f' % averaged_no_racing)
print('pso_gen with racing: %f' % averaged_racing)
print('----------------------------------------------')
return (averaged_no_racing, averaged_racing)
def vary_nr_eval_per_x(default_params):
pars = copy.deepcopy(default_params)
param_list = list(range(3, 20, 2))
f_no_racing_list = []
f_racing_list = []
for n in param_list:
pars['nr_eval_per_x'] = n
f_no_racing, f_racing = start_experiment(**pars)
f_no_racing_list.append(f_no_racing)
f_racing_list.append(f_racing)
plt.ion()
plt.figure()
plt.plot(param_list, f_racing_list, '-o')
plt.plot(param_list, f_no_racing_list, '-s')
plt.legend(['PSO racing', 'PSO without racing'])
plt.xlabel('nr_eval_per_x')
plt.ylabel('Post-evaluated fitness')
prob_stat = '%s-%s' % (stochastic_type, base_problem.get_name())
plt.title('%s\nPSO: With/without racing (fevals=%d) (%d trials)' %
(prob_stat, pars['fevals_max'], pars['num_trials']))
# plt.savefig('%s-psogenracing-nr_eval_per_x.png' % prob_stat)
def vary_neighbourhood_size(default_params):
pars = copy.deepcopy(default_params)
param_list = np.linspace(0.01, 0.2, num=20)
f_no_racing_list = []
f_racing_list = []
for p in param_list:
pars['noise_level'] = p
f_no_racing, f_racing = start_experiment(**pars)
f_no_racing_list.append(f_no_racing)
f_racing_list.append(f_racing)
plt.ion()
plt.figure()
plt.plot(param_list, f_racing_list, '-o')
plt.plot(param_list, f_no_racing_list, '-s')
plt.legend(['PSO racing', 'PSO without racing'], loc='best')
plt.xlabel('Robust\'s neighbourhood size')
plt.ylabel('Post-evaluated fitness')
prob_stat = '%s-%s' % (stochastic_type, base_problem.get_name())
plt.title('%s\nPSO: With/without racing (fevals=%d) (%d trials)' %
(prob_stat, pars['fevals_max'], pars['num_trials']))
# plt.savefig('%s-psogenracing-robust_neighbourhood_small.png' % prob_stat)
def vary_fevals_budget(num_trials=20, nr_eval_per_x=10, nb_size=0.5):
pars = copy.deepcopy(default_params)
param_list = list(range(10000, 200000, 20000))
f_no_racing_list = []
f_racing_list = []
for fevals_max in param_list:
pars['fevals_max'] = fevals_max
f_no_racing, f_racing = start_experiment(**pars)
f_no_racing_list.append(f_no_racing)
f_racing_list.append(f_racing)
plt.ion()
plt.figure()
plt.plot(param_list, f_racing_list, '-o')
plt.plot(param_list, f_no_racing_list, '-s')
plt.legend(['PSO racing', 'PSO without racing'], loc='best')
plt.xlabel('Evaluation budget (# of fevals)')
plt.ylabel('Post-evaluated fitness')
prob_stat = '%s-%s' % (stochastic_type, base_problem.get_name())
plt.title(
'%s\nPSO: With/without racing (neighbourhood size = %.2f) (%d trials)' %
(prob_stat, pars['noise_level'], pars['num_trials']))
# plt.savefig('%s-psogenracing-robust_fevals.png' % prob_stat)
if __name__ == '__main__':
# start_experiment(num_trials=20, pop_size=20, nr_eval_per_x=20, fevals_max=200000)
default_params = dict(
num_trials=10,
pop_size=20,
nr_eval_per_x=10,
fevals_max=100000,
noise_level=0.3)
vary_nr_eval_per_x(default_params)
vary_neighbourhood_size(default_params)
# vary_fevals_budget(default_params)
| gpl-3.0 |
schets/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
par2/lamana-test | lamana/utils/references.py | 1 | 5777 | #------------------------------------------------------------------------------
class Reference(object):
'''A nonfunctional class containing urls for supporting code.'''
# ----- --------- -------------
# (001) imp.relaod http://stackoverflow.com/questions/961162/reloading-module-giving-error-reload-is-not-defined
# (002) __float__ magic method http://www.rafekettler.com/magicmethods.html
# (003) Use all() for conditional testing http://stackoverflow.com/questions/10666163/how-to-check-if-all-elements-of-a-list-matches-a-condition
# (004) Prevent __getattr__ recursuion http://stackoverflow.com/questions/11145501/getattr-going-recursive-in-python
# (005) Cautions with using super() https://fuhm.net/super-harmful/
# (006) dict to DataFrame http://pandas.pydata.org/pandas-docs/version/0.15.2/dsintro.html#from-a-list-of-dicts
# (007) f(x) to rearrange columns http://stackoverflow.com/questions/12329853/how-to-rearrange-pandas-column-sequence
# (008) Exception handling Python 3 https://docs.python.org/3/reference/simple_stmts.html
# (009) List of Exceptions http://www.tutorialspoint.com/python/python_exceptions.htm
# (010) Panadas slicing negative indices https://github.com/pydata/pandas/issues/2600
# (011) Count words in column http://stackoverflow.com/questions/17573814/count-occurrences-of-certain-words-in-pandas-dataframe
# (012) groupby .first() http://pandas.pydata.org/pandas-docs/stable/groupby.html
# (013) Read files from directory https://stackoverflow.com/questions/15994981/python-read-all-files-from-folder-shp-dbf-mxd-etc
# (014) DataFrame equal by Quant https://stackoverflow.com/questions/14224172/equality-in-pandas-dataframes-column-order-matters
# (015) Testing in pandas http://pandas.pydata.org/developers.html#how-to-write-a-test
# (016) How to skip N/A as nan http://pandas.pydata.org/pandas-docs/dev/generated/pandas.io.parsers.read_csv.html
# (017) Default na values http://pandas.pydata.org/pandas-docs/stable/io.html#na-values
# (018) Make smaller chunks from a list https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python
# (019) Laminar Composites Staab, G. Butterworth-Heineman. 1999.
# (020) Inverted cumsum() https://stackoverflow.com/questions/16541618/perform-a-reverse-cumulative-sum-on-a-numpy-array
# (021) groupby cumsum() https://stackoverflow.com/questions/15755057/using-cumsum-in-pandas-on-group
# (022) Select numeric columns https://stackoverflow.com/questions/25039626/find-numeric-columns-in-pandas-python
# (023) Dynamic module import https://stackoverflow.com/questions/301134/dynamic-module-import-in-python
# (024) Test Exceptions https://stackoverflow.com/questions/7799593/how-an-exceptions-be-tested-with-nose
# (025) Print exception traceback; no halt https://stackoverflow.com/questions/3702675/how-to-print-the-full-traceback-without-halting-the-program
# (026) Extract number from string https://stackoverflow.com/questions/4289331/python-extract-numbers-from-a-string
# (027) Natural sort https://stackoverflow.com/questions/2545532/python-analog-of-natsort-function-sort-a-list-using-a-natural-order-algorithm
# (028) Overload __eq__ http://jcalderone.livejournal.com/32837.html
# (029) DataFrames to image https://stackoverflow.com/questions/26678467/export-a-pandas-dataframe-as-a-table-image#
# (030) setter properties https://stackoverflow.com/questions/1684828/how-to-set-attributes-using-property-decorators
# (031) multiprocessing vs threading http://sebastianraschka.com/Articles/2014_multiprocessing_intro.html
# (032) Intro on multiprocessing http://toastdriven.com/blog/2008/nov/11/brief-introduction-multiprocessing/
# (033) Subclassing a dict https://stackoverflow.com/questions/21361106/how-would-i-implement-a-dict-with-abstract-base-classes-in-python
# (034) Slice a dict http://pythoncentral.io/how-to-slice-custom-objects-classes-in-python/
# (035) Implement __hash__ https://stackoverflow.com/questions/4005318/how-to-implement-a-good-hash-function-in-python
# (036) Check if file exists https://stackoverflow.com/questions/82831/check-whether-a-file-exists-using-python
# (037) Make list of alphabet http://snipplr.com/view/5058/
# (038) Annotate rectangle layers https://stackoverflow.com/questions/14531346/how-to-add-a-text-into-a-rectangle
# (039) regex Lookarounds http://www.rexegg.com/regex-lookarounds.html#overlapping
# (040) pyregex http://www.pyregex.com/
# (041) regex search http://stackoverflow.com/questions/1323364/in-python-how-to-check-if-a-string-only-contains-certain-characters
# (042) Example of regex patterns https://hg.python.org/cpython/file/2.7/Lib/tokenize.py#l108
# (043) Interactive, regex visualization https://regex101.com/r/lL0cW7/4
# (044) regex to find comma inside (),[] https://stackoverflow.com/questions/33793037/python-regex-to-find-special-characters-between-delimiters/33793322#33793322
pass
#------------------------------------------------------------------------------
| bsd-3-clause |
JeffreyFish/DocWebTool | DocTracking.py | 2 | 9018 | #!/usr/bin/env python
# -*- Coding: UTF-8 -*-
#------------------------------------
#--Author: Lychee Li
#--CreationDate: 2017/10/18
#--RevisedDate: 2017/10/27
#--RevisedDate: 2018/03/12
#------------------------------------
import datetime
import pyodbc
import common
import pandas as pd
# 读取SQL代码
with open(common.sql_path + '\\DocTracking.sql', 'r') as tracking:
tracking_code = tracking.read()
def get_processid(docid):
connection = pyodbc.connect(common.connection_string_multithread)
code = '''
select ProcessId
from DocumentAcquisition..MasterProcess
where DocumentId=%s
''' % (docid)
cursor = connection.cursor()
processid = cursor.execute(code).fetchall()[0][0]
cursor.close()
connection.close()
return processid
def get_cutter(connection, processid):
connection = pyodbc.connect(common.connection_string_multithread)
code = '''
select ac.Email
from DocumentAcquisition..MasterProcess as mp
left join DocumentAcquisition..Account as ac on ac.DaId=mp.DA2
where mp.ProcessId=%s
'''% (processid)
cursor = connection.cursor()
cutter = cursor.execute(code).fetchall()[0][0]
cursor.close()
connection.close()
return cutter
def get_log(connection, processid, timediff):
connection = pyodbc.connect(common.connection_string_multithread)
cursor = connection.cursor()
log_list = pd.read_sql(tracking_code % (timediff, processid), connection)
log_list = log_list.values.tolist()
cutter = get_cutter(connection, processid)
row = 0
record_list = []
for each in log_list:
row = row + 1
if row == 1:
if each[7] != cutter:
user = cutter
else:
user = cutter
record = [str(row), each[0], each[1], each[2], each[4], user, each[8].strftime('%Y-%m-%d %H:%M:%S'), 'Cutting']
record_list.append(record)
elif each[2] != log_list[row-2][2]:
record = [str(row), each[0], each[1], each[2], each[4], each[7], each[8].strftime('%Y-%m-%d %H:%M:%S'), 'Doctype changed']
record_list.append(record)
elif each[4] != log_list[row-2][4]:
record = [str(row), each[0], each[1], each[2], each[4], each[7], each[8].strftime('%Y-%m-%d %H:%M:%S'), 'EffDate changed']
record_list.append(record)
elif each[5] != log_list[row-2][5]:
if log_list[row-2][5] is None:
record = [str(row), each[0], each[1], each[2], each[4], each[7], each[8].strftime('%Y-%m-%d %H:%M:%S'), 'First mapping']
record_list.append(record)
else:
record = [str(row), each[0], each[1], each[2], each[4], each[7], each[8].strftime('%Y-%m-%d %H:%M:%S'), 'Mapping changed']
record_list.append(record)
elif each[6] != log_list[row-2][6]:
record = [str(row), each[0], each[1], each[2], each[4], each[7], each[8].strftime('%Y-%m-%d %H:%M:%S'), 'Content changed']
record_list.append(record)
else:
record = [str(row), each[0], each[1], each[2], each[4], each[7], each[8].strftime('%Y-%m-%d %H:%M:%S'), '']
record_list.append(record)
cursor.close()
return record_list
def run(processid, timediff):
connection = pyodbc.connect(common.connection_string_multithread)
result_list = get_log(connection, processid, timediff)
result = pd.DataFrame(result_list, columns=['No', 'ProcessId', 'DocumentId', 'DocType', 'EffectiveDate', 'User', 'OperationTime', 'Comment'])
pd.set_option('display.max_colwidth', -1)
html_code = result.to_html(classes='tablestyle', index=False)
html_code = common.css_code + html_code
return html_code
# processid = 56048385
# timediff = 13
# a = run(processid, timediff)
# print(a)
# 读取SQL代码
# with open(common.sql_path + '\\DocTracking_cutting_code.sql', 'r') as cutting_code:
# cutter_code = cutting_code.read()
# with open(common.cur_file_dir()+'\\sql\\DocTracking_first_mapper_code.sql', 'r') as first_mapping_code:
# first_mapper_code = first_mapping_code.read()
# with open(common.cur_file_dir()+'\\sql\\DocTracking_last_mapper_code.sql', 'r') as last_mapping_code:
# last_mapper_code = last_mapping_code.read()
# with open(common.cur_file_dir()+'\\sql\\DocTracking_charting_code.sql', 'r') as charting_code:
# charter_code = charting_code.read()
# with open(common.cur_file_dir()+'\\sql\\DocTracking_link_code.sql', 'r') as link_code:
# linker_code = link_code.read()
# def get_processid(docid):
# connection = pyodbc.connect(common.connection_string_multithread)
# code = '''
# select ProcessId
# from DocumentAcquisition..MasterProcess
# where DocumentId=%s
# ''' % (docid)
# cursor = connection.cursor()
# processid = cursor.execute(code).fetchall()[0][0]
# cursor.close()
# connection.close()
# return processid
# def get_log(connection, processid, timediff):
# cursor = connection.cursor()
# # 获取SQL运行的结果
# # cutter information
# cutting = cursor.execute(cutter_code % (timediff, processid)).fetchall()
# if len(cutting) != 0:
# cutter = cutting[0][0]
# cutter_time = cutting[0][1]
# else:
# cutter = ''
# cutter_time = ''
# # first mapper information
# first_mapping = cursor.execute(first_mapper_code % (timediff, processid)).fetchall()
# if len(first_mapping) != 0:
# first_mapper = first_mapping[0][0]
# first_mapper_time = first_mapping[0][1]
# else:
# first_mapper = ''
# first_mapper_time = ''
# # last mapper information
# last_mapping = cursor.execute(last_mapper_code % (timediff, processid)).fetchall()
# if len(last_mapping) !=0:
# last_mapper = last_mapping[0][0]
# last_mapper_time = last_mapping[0][1]
# else:
# last_mapper = ''
# last_mapper_time = ''
# # charter information
# charting = cursor.execute(charter_code % (timediff, processid)).fetchall()
# if len(charting) != 0:
# charter = charting[0][0]
# charter_time = charting[0][1]
# else:
# charter = ''
# charter_time = ''
# # linker information
# linking = cursor.execute(linker_code % (timediff, processid)).fetchall()
# if len(linking) != 0:
# linker = linking[0][0]
# linker_time = linking[0][1]
# else:
# linker = ''
# linker_time = ''
# record_list = []
# if len(cutter) != 0:
# cutting_list = [str(1), processid, cutter_time.strftime('%Y-%m-%d %H:%M:%S'), 'Cutting', cutter]
# record_list.append(cutting_list)
# else:
# cutting_list = ['', '', '', 'No Cutting', '']
# record_list.append(cutting_list)
# if len(first_mapper) != 0:
# first_mapping_list = [str(2), processid, first_mapper_time.strftime('%Y-%m-%d %H:%M:%S'), 'First Mapping', first_mapper]
# record_list.append(first_mapping_list)
# else:
# first_mapping_list = ['', '', '', 'No Mapping', '']
# record_list.append(first_mapping_list)
# if len(last_mapper) != 0:
# if len(first_mapper) != 0:
# last_mapping_list = [str(3), processid, last_mapper_time.strftime('%Y-%m-%d %H:%M:%S'), 'Last Mapping', last_mapper]
# record_list.append(last_mapping_list)
# else:
# last_mapping_list = [str(3), processid, last_mapper_time.strftime('%Y-%m-%d %H:%M:%S'), 'Mapping is not completed.', last_mapper]
# record_list.append(last_mapping_list)
# else:
# last_mapping_list = ['', '', '', 'No Mapping', '']
# record_list.append(last_mapping_list)
# if len(linker) != 0:
# linking_list = [str(5), processid, linker_time.strftime('%Y-%m-%d %H:%M:%S'), 'Add Link', linker]
# record_list.append(linking_list)
# else:
# linking_list = ['', '', '', 'No Link', '']
# record_list.append(linking_list)
# if len(charter) != 0:
# charting_list = [str(4), processid, charter_time.strftime('%Y-%m-%d %H:%M:%S'), 'Add Chart', charter]
# record_list.append(charting_list)
# else:
# charting_list = ['', '', '', 'No Chart', '']
# record_list.append(charting_list)
# cursor.close()
# return record_list
# def run(processid, timediff):
# connection = pyodbc.connect(common.connection_string_multithread)
# total_result = get_log(connection, processid, timediff)
# pd_result = pd.DataFrame(total_result, columns=['No', 'Processid', 'Operation Time', 'Operation', 'User'])
# pd.set_option('display.max_colwidth', -1)
# html_code = pd_result.to_html(classes='tablestyle', index=False)
# html_code = '<p>ProcessId: ' + str(processid) + '</p>' + common.css_code + html_code
# connection.close()
# return html_code
| gpl-3.0 |
AdrieleD/gr-mac1 | python/qa_dqcsk_mapper_fc.py | 4 | 4241 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Felix Wunsch, Communications Engineering Lab (CEL) / Karlsruhe Institute of Technology (KIT) <wunsch.felix@googlemail.com>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import ieee802_15_4_swig as ieee802_15_4
from css_phy import physical_layer as phy
import css_constants as c
import numpy as np
import matplotlib.pyplot as plt
class qa_dqcsk_mapper_fc (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
cfg = phy()
data_in = [0 for i in range(12)]
self.src = blocks.vector_source_f(data_in)
self.dqcsk = ieee802_15_4.dqcsk_mapper_fc(cfg.chirp_seq, cfg.time_gap_1, cfg.time_gap_2, c.n_sub, cfg.n_subchirps, 120)
self.snk = blocks.vector_sink_c(1)
self.tb.connect(self.src, self.dqcsk, self.snk)
self.tb.run ()
# check data
data_out = self.snk.data()
ref = np.concatenate((cfg.chirp_seq, cfg.time_gap_1, cfg.chirp_seq, cfg.time_gap_2, cfg.chirp_seq, cfg.time_gap_1))
# print "ref:", ref[:10]
# print "data:", data_out[:10]
f,axarr = plt.subplots(2)
# axarr[0].plot(np.real(ref))
# axarr[1].plot(np.real(data_out))
# plt.show()
self.assertComplexTuplesAlmostEqual(data_out, ref)
def test_002_t (self):
# set up fg
cfg = phy()
data_in = [0, np.pi/2, np.pi, -np.pi/2]
self.src = blocks.vector_source_f(data_in)
self.dqcsk = ieee802_15_4.dqcsk_mapper_fc(cfg.chirp_seq, cfg.time_gap_1, cfg.time_gap_2, c.n_sub, cfg.n_subchirps, 4)
self.snk = blocks.vector_sink_c(1)
self.tb.connect(self.src, self.dqcsk, self.snk)
self.tb.run ()
# check data
data_out = self.snk.data()
ref = np.concatenate((cfg.chirp_seq.copy(), cfg.time_gap_1))
for i in range(4):
ref[i*c.n_sub:(i+1)*c.n_sub] = ref[i*c.n_sub:(i+1)*c.n_sub]*np.exp(1j*data_in[i])
# print "ref:", ref[:10]
# print "data:", data_out[:10]
self.assertComplexTuplesAlmostEqual(data_out, ref, 5)
def test_003_t (self):
# set up fg
cfg = phy()
data_in = np.pi/2*np.random.randint(-1,3,(1000,))
self.src = blocks.vector_source_f(data_in)
self.dqcsk = ieee802_15_4.dqcsk_mapper_fc(cfg.chirp_seq, cfg.time_gap_1, cfg.time_gap_2, c.n_sub, cfg.n_subchirps, 1000)
self.snk = blocks.vector_sink_c(1)
self.tb.connect(self.src, self.dqcsk, self.snk)
self.tb.run ()
# check data
data_out = self.snk.data()
ref = np.array([])
seq = cfg.chirp_seq.copy()
lensub = c.n_sub
nseq = len(data_in)/cfg.n_subchirps
len_t1 = len(cfg.time_gap_1)
len_t2 = len(cfg.time_gap_2)
seq_ctr = 0
for i in range(nseq):
for k in range(cfg.n_subchirps):
ref = np.concatenate((ref, seq[k*lensub:(k+1)*lensub]*np.exp(1j*data_in[i*cfg.n_subchirps+k])))
if seq_ctr % 2 == 0:
ref = np.concatenate((ref, cfg.time_gap_1))
else:
ref = np.concatenate((ref, cfg.time_gap_2))
seq_ctr = (seq_ctr+1) % 2
# print "ref:", ref[:10]
# print "data:", data_out[:10]
self.assertComplexTuplesAlmostEqual(data_out, ref, 5)
if __name__ == '__main__':
gr_unittest.run(qa_dqcsk_mapper_fc)
| gpl-3.0 |
DTU-ELMA/European_Dataset | Scripts/Build_Capacity_Layouts/2-Build_capacity_layouts_ECMWF.py | 1 | 1570 | import numpy as np
import pandas as pd
metadatadir = '../../Data/Metadata/'
nodeorder = np.load(metadatadir + 'nodeorder.npy')
windarea = np.load(metadatadir + 'Node_area_wind_onshore_ECMWF.npy') + \
np.load(metadatadir + 'Node_area_wind_offshore_ECMWF.npy')
solararea = np.load(metadatadir + 'Node_area_PV_ECMWF.npy')
wind = pd.read_csv('../../Output_Data/Nodal_TS/wind_signal_ECMWF.csv')
solar = pd.read_csv('../../Output_Data/Nodal_TS/solar_signal_ECMWF.csv')
load = pd.read_csv('../../Output_Data/Nodal_TS/load_signal.csv')
loadsum = load.set_index('Time').sum().sum()
windrelarea = windarea / np.sum(windarea)
solarrelarea = solararea / np.sum(solararea)
windsum = wind.set_index('Time').sum()[nodeorder].values
solarsum = solar.set_index('Time').sum()[nodeorder].values
windrelsum = windsum / windsum.sum()
solarrelsum = solarsum / solarsum.sum()
wind_layout_uniform = windrelarea * loadsum / (windrelarea * windsum).sum()
solar_layout_uniform = solarrelarea * loadsum / (solarrelarea * solarsum).sum()
wind_layout_proportional = windrelarea * windrelsum * loadsum / (windrelarea * windrelsum * windsum).sum()
solar_layout_proportional = solarrelarea * solarrelsum * loadsum / (solarrelarea * solarrelsum * solarsum).sum()
np.save(metadatadir + 'wind_layout_uniform_ECMWF.npy', wind_layout_uniform)
np.save(metadatadir + 'solar_layout_uniform_ECMWF.npy', solar_layout_uniform)
np.save(metadatadir + 'wind_layout_proportional_ECMWF.npy', wind_layout_proportional)
np.save(metadatadir + 'solar_layout_proportional_ECMWF.npy', solar_layout_proportional)
| apache-2.0 |
YinongLong/scikit-learn | examples/model_selection/grid_search_digits.py | 33 | 2764 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
dopplershift/MetPy | src/metpy/units.py | 1 | 9002 | # Copyright (c) 2015,2017,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
r"""Module to provide unit support.
This makes use of the :mod:`pint` library and sets up the default settings
for good temperature support.
Attributes
----------
units : :class:`pint.UnitRegistry`
The unit registry used throughout the package. Any use of units in MetPy should
import this registry and use it to grab units.
"""
import functools
from inspect import Parameter, signature
import logging
import re
import warnings
import numpy as np
import pint
import pint.unit
log = logging.getLogger(__name__)
UndefinedUnitError = pint.UndefinedUnitError
DimensionalityError = pint.DimensionalityError
# Create registry, with preprocessors for UDUNITS-style powers (m2 s-2) and percent signs
units = pint.UnitRegistry(
autoconvert_offset_to_baseunit=True,
preprocessors=[
functools.partial(
re.sub,
r'(?<=[A-Za-z])(?![A-Za-z])(?<![0-9\-][eE])(?<![0-9\-])(?=[0-9\-])',
'**'
),
lambda string: string.replace('%', 'percent')
]
)
# Capture v0.10 NEP 18 warning on first creation
with warnings.catch_warnings():
warnings.simplefilter('ignore')
units.Quantity([])
# For pint 0.6, this is the best way to define a dimensionless unit. See pint #185
units.define(pint.unit.UnitDefinition('percent', '%', (),
pint.converters.ScaleConverter(0.01)))
# Define commonly encountered units not defined by pint
units.define('degrees_north = degree = degrees_N = degreesN = degree_north = degree_N '
'= degreeN')
units.define('degrees_east = degree = degrees_E = degreesE = degree_east = degree_E = degreeE')
# Alias geopotential meters (gpm) to just meters
units.define('@alias meter = gpm')
# Silence UnitStrippedWarning
if hasattr(pint, 'UnitStrippedWarning'):
warnings.simplefilter('ignore', category=pint.UnitStrippedWarning)
def pandas_dataframe_to_unit_arrays(df, column_units=None):
"""Attach units to data in pandas dataframes and return united arrays.
Parameters
----------
df : `pandas.DataFrame`
Data in pandas dataframe.
column_units : dict
Dictionary of units to attach to columns of the dataframe. Overrides
the units attribute if it is attached to the dataframe.
Returns
-------
Dictionary containing united arrays with keys corresponding to the dataframe
column names.
"""
if not column_units:
try:
column_units = df.units
except AttributeError:
raise ValueError('No units attribute attached to pandas '
'dataframe and col_units not given.') from None
# Iterate through columns attaching units if we have them, if not, don't touch it
res = {}
for column in df:
if column in column_units and column_units[column]:
res[column] = units.Quantity(df[column].values, column_units[column])
else:
res[column] = df[column].values
return res
def concatenate(arrs, axis=0):
r"""Concatenate multiple values into a new unitized object.
This is essentially a scalar-/masked array-aware version of `numpy.concatenate`. All items
must be able to be converted to the same units. If an item has no units, it will be given
those of the rest of the collection, without conversion. The first units found in the
arguments is used as the final output units.
Parameters
----------
arrs : Sequence of arrays
The items to be joined together
axis : integer, optional
The array axis along which to join the arrays. Defaults to 0 (the first dimension)
Returns
-------
`pint.Quantity`
New container with the value passed in and units corresponding to the first item.
"""
dest = 'dimensionless'
for a in arrs:
if hasattr(a, 'units'):
dest = a.units
break
data = []
for a in arrs:
if hasattr(a, 'to'):
a = a.to(dest).magnitude
data.append(np.atleast_1d(a))
# Use masked array concatenate to ensure masks are preserved, but convert to an
# array if there are no masked values.
data = np.ma.concatenate(data, axis=axis)
if not np.any(data.mask):
data = np.asarray(data)
return units.Quantity(data, dest)
def masked_array(data, data_units=None, **kwargs):
"""Create a :class:`numpy.ma.MaskedArray` with units attached.
This is a thin wrapper around :func:`numpy.ma.masked_array` that ensures that
units are properly attached to the result (otherwise units are silently lost). Units
are taken from the ``data_units`` argument, or if this is ``None``, the units on ``data``
are used.
Parameters
----------
data : array_like
The source data. If ``data_units`` is `None`, this should be a `pint.Quantity` with
the desired units.
data_units : str or `pint.Unit`, optional
The units for the resulting `pint.Quantity`
kwargs
Arbitrary keyword arguments passed to `numpy.ma.masked_array`, optional
Returns
-------
`pint.Quantity`
"""
if data_units is None:
data_units = data.units
return units.Quantity(np.ma.masked_array(data, **kwargs), data_units)
def _check_argument_units(args, defaults, dimensionality):
"""Yield arguments with improper dimensionality."""
for arg, val in args.items():
# Get the needed dimensionality (for printing) as well as cached, parsed version
# for this argument.
try:
need, parsed = dimensionality[arg]
except KeyError:
# Argument did not have units specified in decorator
continue
if arg in defaults and (defaults[arg] is not None or val is None):
check = val == defaults[arg]
if np.all(check):
continue
# See if the value passed in is appropriate
try:
if val.dimensionality != parsed:
yield arg, val.units, need
# No dimensionality
except AttributeError:
# If this argument is dimensionless, don't worry
if parsed != '':
yield arg, 'none', need
def _get_changed_version(docstring):
"""Find the most recent version in which the docs say a function changed."""
matches = re.findall(r'.. versionchanged:: ([\d.]+)', docstring)
return max(matches) if matches else None
def check_units(*units_by_pos, **units_by_name):
"""Create a decorator to check units of function arguments."""
def dec(func):
# Match the signature of the function to the arguments given to the decorator
sig = signature(func)
bound_units = sig.bind_partial(*units_by_pos, **units_by_name)
# Convert our specified dimensionality (e.g. "[pressure]") to one used by
# pint directly (e.g. "[mass] / [length] / [time]**2). This is for both efficiency
# reasons and to ensure that problems with the decorator are caught at import,
# rather than runtime.
dims = {name: (orig, units.get_dimensionality(orig.replace('dimensionless', '')))
for name, orig in bound_units.arguments.items()}
defaults = {name: sig.parameters[name].default for name in sig.parameters
if sig.parameters[name].default is not Parameter.empty}
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Match all passed in value to their proper arguments so we can check units
bound_args = sig.bind(*args, **kwargs)
bad = list(_check_argument_units(bound_args.arguments, defaults, dims))
# If there are any bad units, emit a proper error message making it clear
# what went wrong.
if bad:
msg = f'`{func.__name__}` given arguments with incorrect units: '
msg += ', '.join(f'`{arg}` requires "{req}" but given "{given}"'
for arg, given, req in bad)
if 'none' in msg:
msg += ('\nAny variable `x` can be assigned a unit as follows:\n'
' from metpy.units import units\n'
' x = units.Quantity(x, "m/s")')
# If function has changed, mention that fact
if func.__doc__:
changed_version = _get_changed_version(func.__doc__)
if changed_version:
msg = (f'This function changed in {changed_version}--double check '
'that the function is being called properly.\n') + msg
raise ValueError(msg)
return func(*args, **kwargs)
return wrapper
return dec
# Enable pint's built-in matplotlib support
units.setup_matplotlib()
del pint
| bsd-3-clause |
ssaeger/scikit-learn | benchmarks/bench_isotonic.py | 38 | 3047 | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This allows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
| bsd-3-clause |
poeticcapybara/pythalesians | pythalesians/timeseries/calcs/timeseriestimezone.py | 1 | 4543 | __author__ = 'saeedamen' # Saeed Amen / saeed@thalesians.com
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
TimeSeriesTimezone
Various functions wrapping onto pandas and pytz for quickly converting timezones for dataframes.
"""
import numpy
import pytz
import pandas.tseries.offsets
class TimeSeriesTimezone:
def convert_index_from_UTC_to_new_york_time(self, data_frame):
new_york = pytz.timezone('America/New_York')
data_frame = data_frame.tz_localize(pytz.utc).tz_convert(new_york)
return data_frame
def convert_index_from_UTC_to_london_time(self, data_frame):
london = pytz.timezone('Europe/London')
data_frame = data_frame.tz_localize(pytz.utc).tz_convert(london)
return data_frame
def convert_index_time_zone(self, data_frame, from_tz, to_tz):
data_frame = data_frame.tz_localize(pytz.timezone(from_tz))\
.tz_convert(pytz.timezone(to_tz))
return data_frame
def convert_index_from_UTC_to_alt(self, data_frame, time_zone):
alt = pytz.timezone(time_zone)
data_frame = data_frame.tz_localize(pytz.utc).tz_convert(alt)
return data_frame
def convert_index_aware_to_UTC_time(self, data_frame):
utc = pytz.timezone('UTC')
data_frame = data_frame.tz_convert(utc)
return data_frame
def convert_index_aware_to_new_york_time(self, data_frame):
new_york = pytz.timezone('America/New_York')
data_frame = data_frame.tz_convert(new_york)
return data_frame
def convert_index_aware_to_london_time(self, data_frame):
london = pytz.timezone('Europe/London')
data_frame = data_frame.tz_convert(london)
return data_frame
def convert_index_aware_to_alt(self, data_frame, time_zone):
alt = pytz.timezone(time_zone)
data_frame = data_frame.tz_convert(alt)
return data_frame
def localise_index_as_UTC(self, data_frame):
data_frame = data_frame.tz_localize(pytz.utc)
return data_frame
def localise_index_as_new_york_time(self, data_frame):
new_york = pytz.timezone('America/New_York')
data_frame = data_frame.tz_localize(new_york)
return data_frame
def set_as_no_timezone(self, data_frame):
data_frame.index.tz = None
return data_frame
def tz_UTC_to_naive(self, data_frame):
"""
tz_UTC_to_naive - Converts a tz-aware DatetimeIndex into a tz-naive DatetimeIndex,
effectively baking the timezone into the internal representation.
Parameters
----------
datetime_index : pandas.DatetimeIndex, tz-aware
Returns
-------
pandas.DatetimeIndex, tz-naive
"""
# data_frame = tsc.convert_index_aware_to_UTC_time(data_frame)
datetime_index = data_frame.index
# Calculate timezone offset relative to UTC
timestamp = datetime_index[0]
tz_offset = (timestamp.replace(tzinfo=None) -
timestamp.tz_convert('UTC').replace(tzinfo=None))
tz_offset_td64 = numpy.timedelta64(tz_offset)
# Now convert to naive DatetimeIndex
data_frame.index = pandas.DatetimeIndex(datetime_index.values + tz_offset_td64)
return -1 #data_frame #(doesn't work)
def tz_strip(self, data_frame):
"""
tz_strip - Converts a tz-aware DatetimeIndex into a tz-naive DatetimeIndex,
effectively baking the timezone into the internal representation.
Parameters
----------
datetime_index : pandas.DatetimeIndex, tz-aware
Returns
-------
pandas.DatetimeIndex, tz-naive
"""
# data_frame = tsc.convert_index_aware_to_UTC_time(data_frame)
datetime_index = data_frame.index
# Now convert to naive DatetimeIndex
data_frame.index = pandas.DatetimeIndex(datetime_index.values)
return None #(TODO fix as doesn't work)
| apache-2.0 |
NorfolkDataSci/presentations | 2018-01_chatbot/serverless-chatbots-workshop-master/LambdaFunctions/nlp/nltk/parse/dependencygraph.py | 7 | 31002 | # Natural Language Toolkit: Dependency Grammars
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Jason Narad <jason.narad@gmail.com>
# Steven Bird <stevenbird1@gmail.com> (modifications)
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
#
"""
Tools for reading and writing dependency trees.
The input is assumed to be in Malt-TAB format
(http://stp.lingfil.uu.se/~nivre/research/MaltXML.html).
"""
from __future__ import print_function, unicode_literals
from collections import defaultdict
from itertools import chain
from pprint import pformat
import subprocess
import warnings
from nltk.tree import Tree
from nltk.compat import python_2_unicode_compatible, string_types
#################################################################
# DependencyGraph Class
#################################################################
@python_2_unicode_compatible
class DependencyGraph(object):
"""
A container for the nodes and labelled edges of a dependency structure.
"""
def __init__(self, tree_str=None, cell_extractor=None, zero_based=False, cell_separator=None, top_relation_label='ROOT'):
"""Dependency graph.
We place a dummy `TOP` node with the index 0, since the root node is
often assigned 0 as its head. This also means that the indexing of the
nodes corresponds directly to the Malt-TAB format, which starts at 1.
If zero-based is True, then Malt-TAB-like input with node numbers
starting at 0 and the root node assigned -1 (as produced by, e.g.,
zpar).
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
"""
self.nodes = defaultdict(lambda: {'address': None,
'word': None,
'lemma': None,
'ctag': None,
'tag': None,
'feats': None,
'head': None,
'deps': defaultdict(list),
'rel': None,
})
self.nodes[0].update(
{
'ctag': 'TOP',
'tag': 'TOP',
'address': 0,
}
)
self.root = None
if tree_str:
self._parse(
tree_str,
cell_extractor=cell_extractor,
zero_based=zero_based,
cell_separator=cell_separator,
top_relation_label=top_relation_label,
)
def remove_by_address(self, address):
"""
Removes the node with the given address. References
to this node in others will still exist.
"""
del self.nodes[address]
def redirect_arcs(self, originals, redirect):
"""
Redirects arcs to any of the nodes in the originals list
to the redirect node address.
"""
for node in self.nodes.values():
new_deps = []
for dep in node['deps']:
if dep in originals:
new_deps.append(redirect)
else:
new_deps.append(dep)
node['deps'] = new_deps
def add_arc(self, head_address, mod_address):
"""
Adds an arc from the node specified by head_address to the
node specified by the mod address.
"""
relation = self.nodes[mod_address]['rel']
self.nodes[head_address]['deps'].setdefault(relation, [])
self.nodes[head_address]['deps'][relation].append(mod_address)
#self.nodes[head_address]['deps'].append(mod_address)
def connect_graph(self):
"""
Fully connects all non-root nodes. All nodes are set to be dependents
of the root node.
"""
for node1 in self.nodes.values():
for node2 in self.nodes.values():
if node1['address'] != node2['address'] and node2['rel'] != 'TOP':
relation = node2['rel']
node1['deps'].setdefault(relation, [])
node1['deps'][relation].append(node2['address'])
#node1['deps'].append(node2['address'])
def get_by_address(self, node_address):
"""Return the node with the given address."""
return self.nodes[node_address]
def contains_address(self, node_address):
"""
Returns true if the graph contains a node with the given node
address, false otherwise.
"""
return node_address in self.nodes
def to_dot(self):
"""Return a dot representation suitable for using with Graphviz.
>>> dg = DependencyGraph(
... 'John N 2\\n'
... 'loves V 0\\n'
... 'Mary N 2'
... )
>>> print(dg.to_dot())
digraph G{
edge [dir=forward]
node [shape=plaintext]
<BLANKLINE>
0 [label="0 (None)"]
0 -> 2 [label="ROOT"]
1 [label="1 (John)"]
2 [label="2 (loves)"]
2 -> 1 [label=""]
2 -> 3 [label=""]
3 [label="3 (Mary)"]
}
"""
# Start the digraph specification
s = 'digraph G{\n'
s += 'edge [dir=forward]\n'
s += 'node [shape=plaintext]\n'
# Draw the remaining nodes
for node in sorted(self.nodes.values(), key=lambda v: v['address']):
s += '\n%s [label="%s (%s)"]' % (node['address'], node['address'], node['word'])
for rel, deps in node['deps'].items():
for dep in deps:
if rel is not None:
s += '\n%s -> %s [label="%s"]' % (node['address'], dep, rel)
else:
s += '\n%s -> %s ' % (node['address'], dep)
s += "\n}"
return s
def _repr_svg_(self):
"""Show SVG representation of the transducer (IPython magic).
>>> dg = DependencyGraph(
... 'John N 2\\n'
... 'loves V 0\\n'
... 'Mary N 2'
... )
>>> dg._repr_svg_().split('\\n')[0]
'<?xml version="1.0" encoding="UTF-8" standalone="no"?>'
"""
dot_string = self.to_dot()
try:
process = subprocess.Popen(
['dot', '-Tsvg'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except OSError:
raise Exception('Cannot find the dot binary from Graphviz package')
out, err = process.communicate(dot_string)
if err:
raise Exception(
'Cannot create svg representation by running dot from string: {}'
''.format(dot_string))
return out
def __str__(self):
return pformat(self.nodes)
def __repr__(self):
return "<DependencyGraph with {0} nodes>".format(len(self.nodes))
@staticmethod
def load(filename, zero_based=False, cell_separator=None, top_relation_label='ROOT'):
"""
:param filename: a name of a file in Malt-TAB format
:param zero_based: nodes in the input file are numbered starting from 0
rather than 1 (as produced by, e.g., zpar)
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
:return: a list of DependencyGraphs
"""
with open(filename) as infile:
return [
DependencyGraph(
tree_str,
zero_based=zero_based,
cell_separator=cell_separator,
top_relation_label=top_relation_label,
)
for tree_str in infile.read().split('\n\n')
]
def left_children(self, node_index):
"""
Returns the number of left children under the node specified
by the given address.
"""
children = chain.from_iterable(self.nodes[node_index]['deps'].values())
index = self.nodes[node_index]['address']
return sum(1 for c in children if c < index)
def right_children(self, node_index):
"""
Returns the number of right children under the node specified
by the given address.
"""
children = chain.from_iterable(self.nodes[node_index]['deps'].values())
index = self.nodes[node_index]['address']
return sum(1 for c in children if c > index)
def add_node(self, node):
if not self.contains_address(node['address']):
self.nodes[node['address']].update(node)
def _parse(self, input_, cell_extractor=None, zero_based=False, cell_separator=None, top_relation_label='ROOT'):
"""Parse a sentence.
:param extractor: a function that given a tuple of cells returns a
7-tuple, where the values are ``word, lemma, ctag, tag, feats, head,
rel``.
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
"""
def extract_3_cells(cells, index):
word, tag, head = cells
return index, word, word, tag, tag, '', head, ''
def extract_4_cells(cells, index):
word, tag, head, rel = cells
return index, word, word, tag, tag, '', head, rel
def extract_7_cells(cells, index):
line_index, word, lemma, tag, _, head, rel = cells
try:
index = int(line_index)
except ValueError:
# index can't be parsed as an integer, use default
pass
return index, word, lemma, tag, tag, '', head, rel
def extract_10_cells(cells, index):
line_index, word, lemma, ctag, tag, feats, head, rel, _, _ = cells
try:
index = int(line_index)
except ValueError:
# index can't be parsed as an integer, use default
pass
return index, word, lemma, ctag, tag, feats, head, rel
extractors = {
3: extract_3_cells,
4: extract_4_cells,
7: extract_7_cells,
10: extract_10_cells,
}
if isinstance(input_, string_types):
input_ = (line for line in input_.split('\n'))
lines = (l.rstrip() for l in input_)
lines = (l for l in lines if l)
cell_number = None
for index, line in enumerate(lines, start=1):
cells = line.split(cell_separator)
if cell_number is None:
cell_number = len(cells)
else:
assert cell_number == len(cells)
if cell_extractor is None:
try:
cell_extractor = extractors[cell_number]
except KeyError:
raise ValueError(
'Number of tab-delimited fields ({0}) not supported by '
'CoNLL(10) or Malt-Tab(4) format'.format(cell_number)
)
try:
index, word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells, index)
except (TypeError, ValueError):
# cell_extractor doesn't take 2 arguments or doesn't return 8
# values; assume the cell_extractor is an older external
# extractor and doesn't accept or return an index.
word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells)
if head == '_':
continue
head = int(head)
if zero_based:
head += 1
self.nodes[index].update(
{
'address': index,
'word': word,
'lemma': lemma,
'ctag': ctag,
'tag': tag,
'feats': feats,
'head': head,
'rel': rel,
}
)
# Make sure that the fake root node has labeled dependencies.
if (cell_number == 3) and (head == 0):
rel = top_relation_label
self.nodes[head]['deps'][rel].append(index)
if self.nodes[0]['deps'][top_relation_label]:
root_address = self.nodes[0]['deps'][top_relation_label][0]
self.root = self.nodes[root_address]
self.top_relation_label = top_relation_label
else:
warnings.warn(
"The graph doesn't contain a node "
"that depends on the root element."
)
def _word(self, node, filter=True):
w = node['word']
if filter:
if w != ',':
return w
return w
def _tree(self, i):
""" Turn dependency graphs into NLTK trees.
:param int i: index of a node
:return: either a word (if the indexed node is a leaf) or a ``Tree``.
"""
node = self.get_by_address(i)
word = node['word']
deps = sorted(chain.from_iterable(node['deps'].values()))
if deps:
return Tree(word, [self._tree(dep) for dep in deps])
else:
return word
def tree(self):
"""
Starting with the ``root`` node, build a dependency tree using the NLTK
``Tree`` constructor. Dependency labels are omitted.
"""
node = self.root
word = node['word']
deps = sorted(chain.from_iterable(node['deps'].values()))
return Tree(word, [self._tree(dep) for dep in deps])
def triples(self, node=None):
"""
Extract dependency triples of the form:
((head word, head tag), rel, (dep word, dep tag))
"""
if not node:
node = self.root
head = (node['word'], node['ctag'])
for i in sorted(chain.from_iterable(node['deps'].values())):
dep = self.get_by_address(i)
yield (head, dep['rel'], (dep['word'], dep['ctag']))
for triple in self.triples(node=dep):
yield triple
def _hd(self, i):
try:
return self.nodes[i]['head']
except IndexError:
return None
def _rel(self, i):
try:
return self.nodes[i]['rel']
except IndexError:
return None
# what's the return type? Boolean or list?
def contains_cycle(self):
"""Check whether there are cycles.
>>> dg = DependencyGraph(treebank_data)
>>> dg.contains_cycle()
False
>>> cyclic_dg = DependencyGraph()
>>> top = {'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0}
>>> child1 = {'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1}
>>> child2 = {'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2}
>>> child3 = {'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3}
>>> child4 = {'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4}
>>> cyclic_dg.nodes = {
... 0: top,
... 1: child1,
... 2: child2,
... 3: child3,
... 4: child4,
... }
>>> cyclic_dg.root = top
>>> cyclic_dg.contains_cycle()
[3, 1, 2, 4]
"""
distances = {}
for node in self.nodes.values():
for dep in node['deps']:
key = tuple([node['address'], dep])
distances[key] = 1
for _ in self.nodes:
new_entries = {}
for pair1 in distances:
for pair2 in distances:
if pair1[1] == pair2[0]:
key = tuple([pair1[0], pair2[1]])
new_entries[key] = distances[pair1] + distances[pair2]
for pair in new_entries:
distances[pair] = new_entries[pair]
if pair[0] == pair[1]:
path = self.get_cycle_path(self.get_by_address(pair[0]), pair[0])
return path
return False # return []?
def get_cycle_path(self, curr_node, goal_node_index):
for dep in curr_node['deps']:
if dep == goal_node_index:
return [curr_node['address']]
for dep in curr_node['deps']:
path = self.get_cycle_path(self.get_by_address(dep), goal_node_index)
if len(path) > 0:
path.insert(0, curr_node['address'])
return path
return []
def to_conll(self, style):
"""
The dependency graph in CoNLL format.
:param style: the style to use for the format (3, 4, 10 columns)
:type style: int
:rtype: str
"""
if style == 3:
template = '{word}\t{tag}\t{head}\n'
elif style == 4:
template = '{word}\t{tag}\t{head}\t{rel}\n'
elif style == 10:
template = '{i}\t{word}\t{lemma}\t{ctag}\t{tag}\t{feats}\t{head}\t{rel}\t_\t_\n'
else:
raise ValueError(
'Number of tab-delimited fields ({0}) not supported by '
'CoNLL(10) or Malt-Tab(4) format'.format(style)
)
return ''.join(template.format(i=i, **node) for i, node in sorted(self.nodes.items()) if node['tag'] != 'TOP')
def nx_graph(self):
"""Convert the data in a ``nodelist`` into a networkx labeled directed graph."""
import networkx
nx_nodelist = list(range(1, len(self.nodes)))
nx_edgelist = [
(n, self._hd(n), self._rel(n))
for n in nx_nodelist if self._hd(n)
]
self.nx_labels = {}
for n in nx_nodelist:
self.nx_labels[n] = self.nodes[n]['word']
g = networkx.MultiDiGraph()
g.add_nodes_from(nx_nodelist)
g.add_edges_from(nx_edgelist)
return g
class DependencyGraphError(Exception):
"""Dependency graph exception."""
def demo():
malt_demo()
conll_demo()
conll_file_demo()
cycle_finding_demo()
def malt_demo(nx=False):
"""
A demonstration of the result of reading a dependency
version of the first sentence of the Penn Treebank.
"""
dg = DependencyGraph("""Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
""")
tree = dg.tree()
tree.pprint()
if nx:
# currently doesn't work
import networkx
from matplotlib import pylab
g = dg.nx_graph()
g.info()
pos = networkx.spring_layout(g, dim=1)
networkx.draw_networkx_nodes(g, pos, node_size=50)
# networkx.draw_networkx_edges(g, pos, edge_color='k', width=8)
networkx.draw_networkx_labels(g, pos, dg.nx_labels)
pylab.xticks([])
pylab.yticks([])
pylab.savefig('tree.png')
pylab.show()
def conll_demo():
"""
A demonstration of how to read a string representation of
a CoNLL format dependency tree.
"""
dg = DependencyGraph(conll_data1)
tree = dg.tree()
tree.pprint()
print(dg)
print(dg.to_conll(4))
def conll_file_demo():
print('Mass conll_read demo...')
graphs = [DependencyGraph(entry)
for entry in conll_data2.split('\n\n') if entry]
for graph in graphs:
tree = graph.tree()
print('\n')
tree.pprint()
def cycle_finding_demo():
dg = DependencyGraph(treebank_data)
print(dg.contains_cycle())
cyclic_dg = DependencyGraph()
cyclic_dg.add_node({'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0})
cyclic_dg.add_node({'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1})
cyclic_dg.add_node({'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2})
cyclic_dg.add_node({'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3})
cyclic_dg.add_node({'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4})
print(cyclic_dg.contains_cycle())
treebank_data = """Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
"""
conll_data1 = """
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
"""
conll_data2 = """1 Cathy Cathy N N eigen|ev|neut 2 su _ _
2 zag zie V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 hen hen Pron Pron per|3|mv|datofacc 2 obj1 _ _
4 wild wild Adj Adj attr|stell|onverv 5 mod _ _
5 zwaaien zwaai N N soort|mv|neut 2 vc _ _
6 . . Punc Punc punt 5 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
1 Dat dat Pron Pron aanw|neut|attr 2 det _ _
2 werkwoord werkwoord N N soort|ev|neut 6 obj1 _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 ze ze Pron Pron per|3|evofmv|nom 6 su _ _
5 zelf zelf Pron Pron aanw|neut|attr|wzelf 3 predm _ _
6 uitgevonden vind V V trans|verldw|onverv 3 vc _ _
7 . . Punc Punc punt 6 punct _ _
1 Het het Pron Pron onbep|neut|zelfst 2 su _ _
2 hoorde hoor V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 bij bij Prep Prep voor 2 ld _ _
4 de de Art Art bep|zijdofmv|neut 6 det _ _
5 warme warm Adj Adj attr|stell|vervneut 6 mod _ _
6 zomerdag zomerdag N N soort|ev|neut 3 obj1 _ _
7 die die Pron Pron betr|neut|zelfst 6 mod _ _
8 ze ze Pron Pron per|3|evofmv|nom 12 su _ _
9 ginds ginds Adv Adv gew|aanw 12 mod _ _
10 achter achter Adv Adv gew|geenfunc|stell|onverv 12 svp _ _
11 had heb V V hulp|ovt|1of2of3|ev 7 body _ _
12 gelaten laat V V trans|verldw|onverv 11 vc _ _
13 . . Punc Punc punt 12 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 hadden heb V V trans|ovt|1of2of3|mv 0 ROOT _ _
3 languit languit Adv Adv gew|geenfunc|stell|onverv 11 mod _ _
4 naast naast Prep Prep voor 11 mod _ _
5 elkaar elkaar Pron Pron rec|neut 4 obj1 _ _
6 op op Prep Prep voor 11 ld _ _
7 de de Art Art bep|zijdofmv|neut 8 det _ _
8 strandstoelen strandstoel N N soort|mv|neut 6 obj1 _ _
9 kunnen kan V V hulp|inf 2 vc _ _
10 gaan ga V V hulp|inf 9 vc _ _
11 liggen lig V V intrans|inf 10 vc _ _
12 . . Punc Punc punt 11 punct _ _
1 Zij zij Pron Pron per|3|evofmv|nom 2 su _ _
2 zou zal V V hulp|ovt|1of2of3|ev 7 cnj _ _
3 mams mams N N soort|ev|neut 4 det _ _
4 rug rug N N soort|ev|neut 5 obj1 _ _
5 ingewreven wrijf V V trans|verldw|onverv 6 vc _ _
6 hebben heb V V hulp|inf 2 vc _ _
7 en en Conj Conj neven 0 ROOT _ _
8 mam mam V V trans|ovt|1of2of3|ev 7 cnj _ _
9 de de Art Art bep|zijdofmv|neut 10 det _ _
10 hare hare Pron Pron bez|3|ev|neut|attr 8 obj1 _ _
11 . . Punc Punc punt 10 punct _ _
1 Of of Conj Conj onder|metfin 0 ROOT _ _
2 ze ze Pron Pron per|3|evofmv|nom 3 su _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 gewoon gewoon Adj Adj adv|stell|onverv 10 mod _ _
5 met met Prep Prep voor 10 mod _ _
6 haar haar Pron Pron bez|3|ev|neut|attr 7 det _ _
7 vriendinnen vriendin N N soort|mv|neut 5 obj1 _ _
8 rond rond Adv Adv deelv 10 svp _ _
9 kunnen kan V V hulp|inf 3 vc _ _
10 slenteren slenter V V intrans|inf 9 vc _ _
11 in in Prep Prep voor 10 mod _ _
12 de de Art Art bep|zijdofmv|neut 13 det _ _
13 buurt buurt N N soort|ev|neut 11 obj1 _ _
14 van van Prep Prep voor 13 mod _ _
15 Trafalgar_Square Trafalgar_Square MWU N_N eigen|ev|neut_eigen|ev|neut 14 obj1 _ _
16 . . Punc Punc punt 15 punct _ _
"""
if __name__ == '__main__':
demo()
| mit |
yejingfu/samples | tensorflow/pyplot05.py | 1 | 2102 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import re
small = np.array([2561,2472,2544,2548,2576,2262,2540,2570,2546,2570,2574,2552,2546,2614,2600,2526,2578,2550,2630,2580,2598,2580,2634,2556,2588,2580,2584,2618,2616,2594,2530,2586,2602,2596,2544,2550,2568,2580,2540,2560,2560,2580,2598,2590,2578,2590,2592,2560,2542,2548,2566,2608,2594,2580,2596,2620,2614,2570,2622,2566,2598,2504,2572,2564,2582,2590,2564,2592,2598,2570,2602,2600,2568,2556,2598,2550,2574,2548,2596,2624,2608,2624,2608,2572,2604,2616,2600,2516,2604])
medium = np.array([59,58,104,104,76,62,98,62,156,226,204,62,124,48,216,136,232,164,266,118,68,64,84,96,150,278,166,134,218,180,228,230,170,116,238,278,162,142,278,248,282,236,146,190,168,256,152,248,226,278,286,270,268,272,236,268,270,268,264,242,204,246,194,84,232,266,182,102,92,250,84,246,272,268,140,252,266,206,272,102,238,250,282,116,106,156,182,268,190])
big = np.array([11,14,16,14,10,14,14,16,16,14,14,16,16,16,16,14,14,14,16,14,16,16,14,14,14,16,16,16,12,14,14,14,14,14,14,14,12,16,12,14,14,14,16,14,14,16,14,16,16,16,16,16,14,14,14,16,16,16,16,14,16,16,16,14,14,14,16,16,16,14,16,16,14,16,16,16,14,16,16,16,14,16,12,14,10,12,10,12,12])
small_besteffort = np.array([3897,3738,3448,3694,3704,3688,3742,3706,3696,3880,3514,3880,3378,3702,3616,3756,3878,3740,3972,3578,3912,3870,3592,3692,3808,3702,3412,3464,3268,3690,3628,3802,3324,3492,3438,3162,3140,3540,3828,3638,3714,3704,3612,3520,3226,3620,3714,3462,3698,3576,3522,3594,3876,3508,3760,3572,3852,3726,3448,3404,3878,3686,3940,3930,3418,3186,3692,3728,3612,3894,3728,3302,3574,3742,3808,3936,3704,3784,3448,3500,3874,3956,3846,3800,3584,3828,3328,3646,3688])
small = small * 10
medium = medium * 10
big = big * 10
small_besteffort = small_besteffort * 10
print("size: {}, {}, {}, {}".format(len(small), len(medium), len(big), len(small_besteffort)))
plt.subplot(311)
#plt.title("payload=16KB")
plt.plot(small)
plt.plot(small_besteffort)
plt.subplot(312)
#plt.title("payload=512KB")
plt.plot(medium)
plt.subplot(313)
#plt.title("payload=8MB")
plt.plot(big)
plt.show()
print("done!")
| mit |
Meisterschueler/ogn-python | app/main/matplotlib_service.py | 2 | 1400 | from app import db
from app.model import DirectionStatistic
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
def create_range_figure2(sender_id):
fig = Figure()
axis = fig.add_subplot(1, 1, 1)
xs = range(100)
ys = [random.randint(1, 50) for x in xs]
axis.plot(xs, ys)
return fig
def create_range_figure(sender_id):
sds = db.session.query(DirectionStatistic) \
.filter(DirectionStatistic.sender_id == sender_id) \
.order_by(DirectionStatistic.directions_count.desc()) \
.limit(1) \
.one()
fig = Figure()
direction_data = sds.direction_data
max_range = max([r['max_range'] / 1000.0 for r in direction_data])
theta = np.array([i['direction'] / 180 * np.pi for i in direction_data])
radii = np.array([i['max_range'] / 1000 if i['max_range'] > 0 else 0 for i in direction_data])
width = np.array([13 / 180 * np.pi for i in direction_data])
colors = plt.cm.viridis(radii / max_range)
ax = fig.add_subplot(111, projection='polar')
ax.bar(theta, radii, width=width, bottom=0.0, color=colors, edgecolor='b', alpha=0.5)
#ax.set_rticks([0, 25, 50, 75, 100, 125, 150])
ax.set_theta_zero_location("N")
ax.set_theta_direction(-1)
fig.suptitle(f"Range between sender '{sds.sender.name}' and receiver '{sds.receiver.name}'")
return fig
| agpl-3.0 |
356255531/SpikingDeepRLControl | code/EnvBo/Q-Learning/Testing_Arm_1point/testing.py | 1 | 4647 | #!/usr/bin/python
import matplotlib
matplotlib.backend = 'Qt4Agg'
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(precision=4)
import os
import sys
import threading
import time
from collections import deque
# import own modules
import agents
import goals
import q_networks
ARM_LENGTH_1 = 3.0
ARM_LENGTH_2 = 5.0
ANGULAR_ARM_VELOCITY = 1.0*np.pi/180.0
GOAL_THRESHOLD = 0.05
HEIGHT = 20
MAX_STEPS = 500
NUM_OF_ACTIONS = 4
NUM_OF_ACTORS = 4
NUM_OF_PLOTS_X = 2
NUM_OF_PLOTS_Y = 2
NUM_OF_STATES = 6
WIDTH = 20
class Actor(threading.Thread):
def __init__(self, threadID, goal_threshold=GOAL_THRESHOLD, max_steps=MAX_STEPS):
threading.Thread.__init__(self)
self.agent = None # place-holder for agent
self.goal = None # placer-holder for goal
self.GOAL_THRESHOLD = goal_threshold # desired distance to goal; episode is finished early if threshold is achieved
self.MAX_STEPS = max_steps # maximal steps per episode
self.THREAD_ID = threadID # thread id (integer)
self.path = deque([], maxlen=500)
def get_state(self):
# state is composed by agent + goal states
return np.hstack((self.agent.get_state(), self.goal.get_state()))
def episode_finished(self):
agent_pos = self.agent.get_position()
goal_pos = self.goal.get_position()
distance = np.linalg.norm(agent_pos[:2] - goal_pos[:2])
if distance < GOAL_THRESHOLD:
return True # episode finished if agent already at goal
else:
return False
def plot(self):
# stepwise refreshing of plot
ax[0,self.THREAD_ID].clear()
# plotting of AGENT, GOAL and set AXIS LIMITS
self.goal.plot(ax[0,self.THREAD_ID])
self.agent.plot(ax[0,self.THREAD_ID])
ax[0,self.THREAD_ID].set_xlim([-WIDTH/2, WIDTH/2])#[0,WIDTH])
ax[0,self.THREAD_ID].set_ylim([-HEIGHT/2, HEIGHT/2])#[0,HEIGHT])
for point in self.path:
ax[0,self.THREAD_ID].plot(point[0],point[1],'co')
def run(self):
while True:
# init new episode
plotting_lock.acquire()
self.agent = agents.Arm(angular_velocity_1=ANGULAR_ARM_VELOCITY, angular_velocity_2=ANGULAR_ARM_VELOCITY, arm_length_1=ARM_LENGTH_1, arm_length_2=ARM_LENGTH_2)
self.goal = goals.Goal_Arm(ARM_LENGTH_1, ARM_LENGTH_2)
plotting_lock.release()
for step in range(self.MAX_STEPS):
# produce experience
state = self.get_state()
self.path.append(self.agent.get_end_effector_position())
# get lock to synchronize threads
networks_lock.acquire()
q = networks.online_net.predict(state.reshape(1,NUM_OF_STATES), batch_size=1)
networks_lock.release()
action = np.argmax(q) # choose best action from Q(s,a)
# take action, observe next state s'
self.agent.set_action(action)
self.agent.update()
next_state = self.get_state()
# check if agent at goal
terminal = self.episode_finished()
# plot the scene
plotting_lock.acquire()
self.plot()
plotting_lock.release()
if terminal:
break # start new episode
# episodic refreshing of plot
#plotting_lock.acquire()
#ax[0,self.THREAD_ID].clear()
#plotting_lock.release()
if __name__ == "__main__":
# create GLOBAL thread-locks
console_lock = threading.Lock()
networks_lock = threading.Lock()
plotting_lock = threading.Lock()
# create GLOBAL Q-NETWORKS
networks = q_networks.QNetworks(NUM_OF_ACTIONS, NUM_OF_STATES)
# initialize GLOBAL plotting
fig, ax = plt.subplots(NUM_OF_PLOTS_Y,NUM_OF_PLOTS_X)
ax = ax.reshape(1, ax.shape[0]*ax.shape[1])
plt.ion()
# create threads
threads = []
threads.extend([Actor(i) for i in range(NUM_OF_ACTORS)])
# set daemon, allowing Ctrl-C
for i in range(len(threads)):
threads[i].daemon = True
# start new Threads
[threads[i].start() for i in range(len(threads))]
# show plot
plt.show()
while True:
plotting_lock.acquire()
fig.canvas.flush_events()
plotting_lock.release()
time.sleep(0.1) | gpl-3.0 |
cngo-github/nupic | examples/opf/tools/testDiagnostics.py | 58 | 1606 | import numpy as np
def printMatrix(inputs, spOutput):
''' (i,j)th cell of the diff matrix will have the number of inputs for which the input and output
pattern differ by i bits and the cells activated differ at j places.
Parameters:
--------------------------------------------------------------------
inputs: the input encodings
spOutput: the coincidences activated in response to each input
'''
from pylab import matplotlib as mat
w=len(np.nonzero(inputs[0])[0])
numActive=len(np.nonzero(spOutput[0])[0])
matrix = np.zeros([2*w+1,2*numActive+1])
for x in xrange(len(inputs)):
i = [_hammingDistance(inputs[x], z) for z in inputs[x:]]
j = [_hammingDistance(spOutput[x], a) for a in spOutput[x:]]
for p, q in zip(i,j):
matrix[p,q]+=1
for y in xrange(len(matrix)) :
matrix[y]=[max(10*x, 100) if (x<100 and x>0) else x for x in matrix[y]]
cdict = {'red':((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.7),(1.0,1.0,1.0)),\
'green': ((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.0),(1.0,1.0,1.0)),\
'blue': ((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.0),(1.0,0.5,1.0))}
my_cmap = mat.colors.LinearSegmentedColormap('my_colormap',cdict,256)
pyl=mat.pyplot
pyl.matshow(matrix, cmap = my_cmap)
pyl.colorbar()
pyl.ylabel('Number of bits by which the inputs differ')
pyl.xlabel('Number of cells by which input and output differ')
pyl.title('The difference matrix')
pyl.show()
def _hammingDistance(s1, s2):
"""Hamming distance between two numpy arrays s1 and s2"""
return sum(abs(s1-s2))
| agpl-3.0 |
ilyes14/scikit-learn | sklearn/tests/test_naive_bayes.py | 70 | 17509 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| bsd-3-clause |
henridwyer/scikit-learn | sklearn/utils/tests/test_testing.py | 144 | 4121 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
mdigiorgio/lisa | libs/utils/energy_model.py | 1 | 36419 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2016, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple, OrderedDict
from itertools import product
import logging
import operator
import re
import pandas as pd
import numpy as np
from devlib.utils.misc import memoized, mask_to_list
from devlib import TargetError
"""Classes for modeling and estimating energy usage of CPU systems"""
def read_multiple_oneline_files(target, glob_patterns):
"""
Quickly read many single-line files that match a glob pattern
Finds all the files that match any of the glob patterns and, assuming that
they each contain exactly 1 line of text, read them all at once. When the
target or connection is slow this saves a lot of time when reading a large
number of files.
This will only work safely on stationary files, don't try to use it where
the glob expansion will change often - for example /proc/**/autogroup would
not work because /proc/ entries will likely appear & disappear while we're
reading them.
:param target: devlib target object to read from
:param glob_pattern: Unix glob pattern matching the files to read
:returns: A dictionary mapping matched paths to the values read. ``{}`` if
no paths matched the globs.
"""
find_cmd = 'find ' + ' '.join(glob_patterns)
try:
paths = target.execute(find_cmd).split()
except TargetError:
return {}
cmd = '{} | {} xargs cat'.format(find_cmd, target.busybox)
contents = target.execute(cmd).splitlines()
if len(contents) != len(paths):
raise RuntimeError('File count mismatch while reading multiple files')
return dict(zip(paths, contents))
class EnergyModelCapacityError(Exception):
"""Used by :meth:`EnergyModel.get_optimal_placements`"""
pass
class ActiveState(namedtuple('ActiveState', ['capacity', 'power'])):
"""Represents power and compute capacity at a given frequency
:param capacity: Relative compute capacity at frequency
:param power: Power usage at frequency
"""
def __new__(cls, capacity=None, power=None):
return super(ActiveState, cls).__new__(cls, capacity, power)
class _CpuTree(object):
"""Internal class. Abstract representation of a CPU topology.
Each node contains either a single CPU or a set of child nodes.
"""
def __init__(self, cpu, children):
if (cpu is None) == (children is None):
raise ValueError('Provide exactly one of: cpu or children')
self.parent = None
self.cpu = cpu
if cpu is not None:
self.cpus = (cpu,)
self.children = []
else:
if len(children) == 0:
raise ValueError('children cannot be empty')
self.cpus = tuple(sorted(set().union(*[n.cpus for n in children])))
self.children = children
for child in children:
child.parent = self
self.name = None
def __repr__(self):
name_bit = ''
if self.name:
name_bit = 'name="{}", '.format(self.name)
if self.children:
return '{}({}children={})'.format(
self.__class__.__name__, name_bit, self.children)
else:
return '{}({}cpus={})'.format(
self.__class__.__name__, name_bit, self.cpus)
def _iter(self, include_non_leaves):
for child in self.children:
for child_i in child._iter(include_non_leaves):
yield child_i
if include_non_leaves or not self.children:
yield self
def iter_nodes(self):
"""Iterate over nodes depth-first, post-order"""
return self._iter(True)
def iter_leaves(self):
"""Iterate over leaves"""
return self._iter(False)
class EnergyModelNode(_CpuTree):
"""Describes topology and energy data for an EnergyModel.
Represents a CPU topology with energy data. The active and idle state data
represents the power usage of just the hardware resources of this topology
level, not its children. e.g. If the node represents a cluster, the power
numbers should not include power used by the CPU - that power should be
included the data of the child nodes.
Exactly one of ``cpu`` and ``children`` must be given.
:param active_states: Dict mapping frequencies to :class:`ActiveState`
values. Compute capacity data is optional for
non-leaf nodes.
:param idle_states: Dict mapping idle state names to power usage values
:param cpu: The CPU this node represents. If provided, this is a leaf node.
:type cpus: tuple(int)
:param children: Non-empty list of child :class:`EnergyModelNode` objects
:param name: Optional human-readable name for this node. Leaf (CPU) nodes
have a default name of "cpuN" where N is the cpu number.
:ivar cpus: CPUs contained in this node. Includes those of child nodes.
:ivar cpu: For convenience, this holds the single CPU contained by leaf
nodes. ``None`` for non-leaf nodes.
"""
def __init__(self, active_states, idle_states,
cpu=None, children=None, name=None):
super(EnergyModelNode, self).__init__(cpu, children)
self._log = logging.getLogger('EnergyModel')
def is_monotonic(l, decreasing=False):
op = operator.ge if decreasing else operator.le
return all(op(a, b) for a, b in zip(l, l[1:]))
if active_states:
# Sanity check for active_states's frequencies
freqs = active_states.keys()
if not is_monotonic(freqs):
self._log.warning(
'Active states frequencies are expected to be '
'monotonically increasing. Freqs: {}'.format(freqs))
# Sanity check for active_states's powers
power_vals = [s.power for s in active_states.values()]
if not is_monotonic(power_vals):
self._log.warning(
'Active states powers are expected to be '
'monotonically increasing. Values: {}'.format(power_vals))
# Sanity check for idle_states powers
if idle_states:
power_vals = idle_states.values()
if not is_monotonic(power_vals, decreasing=True):
self._log.warning(
'Idle states powers are expected to be '
'monotonically decreasing. Values: {}'.format(power_vals))
if cpu is not None and not name:
name = 'cpu' + str(cpu)
self.name = name
self.active_states = active_states
self.idle_states = idle_states
@property
def max_capacity(self):
"""Compute capacity at highest frequency"""
return max(s.capacity for s in self.active_states.values())
class EnergyModelRoot(EnergyModelNode):
"""
Convenience class for root of an EnergyModelNode tree.
Just like EnergyModelNode except that ``active_states`` and ``idle_states``
aren't required.
"""
def __init__(self, active_states=None, idle_states=None,
cpu=None, children=None, name=None):
return super(EnergyModelRoot, self).__init__(
active_states, idle_states, cpu, children, name)
class PowerDomain(_CpuTree):
"""Describes the power domain hierarchy for an EnergyModel.
Power domains are a description of the topological dependencies in hardware
for entering idle states. "Composite" states such as cluster-sleep states
require a set of CPUs to all be idle before that state can be entered. In
that case those CPUs can be grouped into a power domain, and that composite
state attached to the power domain. Note that cpuidle is not aware of these
dependencies; they are typically handled by the platform firmware.
Exactly one of ``cpu`` and ``children`` must be given. That is, leaves of
the PowerDomain tree always contain exactly one CPU - each CPU is
represented as being in a power domain of its own. This represents the
assumption that all CPUs have at least one idle state (such as ARM WFI) that
they can enter independently of other CPUs.
:param idle_states: List of names of idle states for this power domain. Does
not store power data - these names are used as keys into
the ``idle_states`` field of :class:`EnergyModelNode`
objects.
:type idle_states: list(str)
:param cpu: The CPU this node represents. If provided, this is a leaf node.
:type cpu: int
:param children: Non-empty list of child :class:`PowerDomain` objects
:type children: list(PowerDomain)
:ivar cpus: CPUs contained in this node. Includes those of child nodes.
:type cpus: tuple(int)
"""
def __init__(self, idle_states, cpu=None, children=None):
if idle_states is None:
raise ValueError('idle_states cannot be None (but may be empty)')
super(PowerDomain, self).__init__(cpu, children)
self.idle_states = idle_states
class EnergyModel(object):
"""Represents hierarchical CPU topology with power and capacity data
An energy model consists of
- A CPU topology, representing the physical (cache/interconnect) topology of
the CPUs. Each node stores the energy usage of that node's hardware when
it is in each active or idle state. They also store a compute capacity at
each frequency, but this is only meaningful for leaf nodes (CPUs) and may
be None at higher levels. These capacity values are relative; the maximum
capacity would usually be 1024, the value of SCHED_CAPACITY_SCALE in the
Linux kernel scheduler. Use EnergyModelNodes to describe this.
- A power domain topology, representing the hierarchy of areas that can be
powered down (idled).
The power domains are a single tree. Leaf nodes must contain exactly one
CPU and the root node must indirectly contain every CPU. Each power domain
has a list (maybe empty) of names of idle states that that domain can
enter.
Use PowerDomains to describe this.
- A set of frequency domains, representing groups of CPUs whose clock
frequencies must be equal (probably because they share a clock). The
frequency domains must be a partition of the CPUs.
:ivar cpu_nodes: List of leaf (CPU) :class:`EnergyModelNode`
:ivar cpus: List of logical CPU numbers in the system
:param root_node: Root of :class:`EnergyModelNode` tree
:param root_power_domain: Root of :class:`PowerDomain` tree
:param freq_domains: Collection of collections of logical CPU numbers
representing frequency (clock) domains.
.. note::
The most signficant shortcomings of the model are:
1. Voltage domains are assumed to be congruent to frequency domains
2. Idle state power is assumed to be independent of voltage
3. Temperature is ignored entirely
.. _cpu-utils:
.. admonition:: ``cpu_utils``: CPU util distributions
Used throughout this module: A ``cpu_utils`` is a list ``u`` where
``u[N]`` is the sum of the frequency-invariant, capacity-invariant
utilization of tasks placed on CPU N. That is, the quantity represented
by a CPU runqueue's util_avg in the Linux kernel scheduler's
load-tracking system with EAS features enabled.
The range of utilization values is 0 -
:attr:`EnergyModel.capacity_scale`.
This represents a static utilization, assuming that tasks don't change
in size (for example representing a set of fixed periodic RT-App
workloads). For workloads that change over time, a series of
``cpu_utils`` items would be needed to describe the utilization, with a
distinct estimation for each item in the series.
"""
capacity_scale = 1024
"""The relative computational capacity of the most powerful CPU at its
highest available frequency.
"""
def __init__(self, root_node, root_power_domain, freq_domains):
self.cpus = root_node.cpus
if self.cpus != tuple(range(len(self.cpus))):
raise ValueError('CPU IDs [{}] are sparse'.format(self.cpus))
# Check that freq_domains is a partition of the CPUs
fd_intersection = set().intersection(*freq_domains)
if fd_intersection:
raise ValueError('CPUs {} exist in multiple freq domains'.format(
fd_intersection))
fd_difference = set(self.cpus) - set().union(*freq_domains)
if fd_difference:
raise ValueError('CPUs {} not in any frequency domain'.format(
fd_difference))
self.freq_domains = freq_domains
# Check that nodes with energy data are all within a frequency domain
for node in root_node.iter_nodes():
if not node.active_states or node.idle_states:
continue
cpu_freq_doms = []
for cpu in node.cpus:
[cpu_freq_dom] = [d for d in freq_domains if cpu in d]
cpu_freq_doms.append(cpu_freq_dom)
if not all(d == cpu_freq_doms[0] for d in cpu_freq_doms[1:]):
raise ValueError(
'Node {} (CPUs {}) '
'has energy data and overlaps freq domains'.format(
node.name, node.cpus))
def sorted_leaves(root):
# Get a list of the leaf (cpu) nodes of a _CpuTree in order of the
# CPU ID
ret = sorted(list(root.iter_leaves()), key=lambda n: n.cpus[0])
assert all(len(n.cpus) == 1 for n in ret)
return ret
self.root = root_node
self.cpu_nodes = sorted_leaves(root_node)
self.cpu_pds = sorted_leaves(root_power_domain)
assert len(self.cpu_pds) == len(self.cpu_nodes)
self._log = logging.getLogger('EnergyModel')
max_cap = max(n.max_capacity for n in self.cpu_nodes)
if max_cap != self.capacity_scale:
self._log.debug(
'Unusual max capacity (%s), overriding capacity_scale', max_cap)
self.capacity_scale = max_cap
def _cpus_with_capacity(self, cap):
"""
Helper method to find the CPUs whose max capacity equals cap
"""
return [c for c in self.cpus
if self.cpu_nodes[c].max_capacity == cap]
@property
@memoized
def biggest_cpus(self):
"""
The CPUs with the highest compute capacity at their highest frequency
"""
return self._cpus_with_capacity(self.capacity_scale)
@property
@memoized
def littlest_cpus(self):
"""
The CPUs with the lowest compute capacity at their highest frequency
"""
min_cap = min(n.max_capacity for n in self.cpu_nodes)
return self._cpus_with_capacity(min_cap)
@property
@memoized
def is_heterogeneous(self):
"""
True iff CPUs do not all have the same efficiency and OPP range
"""
states = self.cpu_nodes[0].active_states
return any(c.active_states != states for c in self.cpu_nodes[1:])
@property
@memoized
def cpu_groups(self):
"""
List of lists of CPUs who share the same active state values
"""
groups = []
for node in self.cpu_nodes:
for group in groups:
group_states = self.cpu_nodes[group[0]].active_states
if node.active_states == group_states:
group.append(node.cpu)
break
else:
groups.append([node.cpu])
return groups
def _guess_idle_states(self, cpus_active):
def find_deepest(pd):
if not any(cpus_active[c] for c in pd.cpus):
if pd.parent:
parent_state = find_deepest(pd.parent)
if parent_state:
return parent_state
return pd.idle_states[-1] if len(pd.idle_states) else None
return None
return [find_deepest(pd) for pd in self.cpu_pds]
def get_cpu_capacity(self, cpu, freq=None):
"""Convenience method to get the capacity of a CPU at a given frequency
:param cpu: CPU to get capacity for
:param freq: Frequency to get the CPU capacity at. Default is max
capacity.
"""
if freq is None:
return self.cpu_nodes[cpu].max_capacity
return self.cpu_nodes[cpu].active_states[freq].capacity
def guess_idle_states(self, cpus_active):
"""Pessimistically guess the idle states that each CPU may enter
If a CPU has any tasks it is estimated that it may only enter its
shallowest idle state in between task activations. If all the CPUs
within a power domain have no tasks, they will all be judged able to
enter that domain's deepest idle state. If any CPU in a domain has work,
no CPUs in that domain are assumed to enter any domain shared state.
e.g. Consider a system with
- two power domains PD0 and PD1
- 4 CPUs, with CPUs [0, 1] in PD0 and CPUs [2, 3] in PD1
- 4 idle states: "WFI", "cpu-sleep", "cluster-sleep-0" and
"cluster-sleep-1", where the "cluster-sleep-*" states domain states,
i.e. a CPU can only enter those states when both CPUs in the domain
are idle.
Then here are some example inputs and outputs:
::
# All CPUs idle:
[0, 0, 0, 0] -> ["cluster-sleep-1", "cluster-sleep-1",
"cluster-sleep-1", "cluster-sleep-1"]
# All CPUs have work
[1, 1, 1, 1] -> ["WFI","WFI","WFI", "WFI"]
# One power domain active, the other idle
[0, 0, 1, 1] -> ["cluster-sleep-1", "cluster-sleep-1", "WFI","WFI"]
# One CPU active.
# Note that CPU 2 has no work but is assumed to never be able to enter
# any "cluster" state.
[0, 0, 0, 1] -> ["cluster-sleep-1", "cluster-sleep-1",
"cpu-sleep","WFI"]
:param cpus_active: list where bool(cpus_active[N]) is False iff no
tasks will run on CPU N.
:returns: List ``ret`` where ``ret[N]`` is the name of the estimated
idle state that CPU N can enter during idle periods.
"""
states = self._guess_idle_states(cpus_active)
return [s or c.idle_states.keys()[0]
for s, c in zip(states, self.cpu_nodes)]
def _guess_freqs(self, cpu_utils):
overutilized = False
# Find what frequency each CPU would need if it was alone in its
# frequency domain
ideal_freqs = [0 for _ in self.cpus]
for node in self.cpu_nodes:
[cpu] = node.cpus
required_cap = cpu_utils[cpu]
possible_freqs = [f for f, s in node.active_states.iteritems()
if s.capacity >= required_cap]
if possible_freqs:
ideal_freqs[cpu] = min(possible_freqs)
else:
# CPU cannot provide required capacity, use max freq
ideal_freqs[cpu] = max(node.active_states.keys())
overutilized = True
# Rectify the frequencies among domains
freqs = [0 for _ in ideal_freqs]
for domain in self.freq_domains:
domain_freq = max(ideal_freqs[c] for c in domain)
for cpu in domain:
freqs[cpu] = domain_freq
return freqs, overutilized
def guess_freqs(self, cpu_utils):
"""Work out CPU frequencies required to execute a workload
Find the lowest possible frequency for each CPU that provides enough
capacity to satisfy the utilization, taking into account frequency
domains.
:param cpu_utils: Utilization distribution, see
:ref:`cpu_utils <cpu-utils>`
:returns: List ``ret`` where ``ret[N]`` is the frequency that CPU N must
run at
"""
freqs, _ = self._guess_freqs(cpu_utils)
return freqs
def _estimate_from_active_time(self, cpu_active_time, freqs, idle_states,
combine):
"""Helper for estimate_from_cpu_util
Like estimate_from_cpu_util but uses active time i.e. proportion of time
spent not-idle in the range 0.0 - 1.0.
If combine=False, return idle and active power as separate components.
"""
power = 0
ret = {}
assert all(0.0 <= a <= 1.0 for a in cpu_active_time)
for node in self.root.iter_nodes():
# Some nodes might not have energy model data, they could just be
# used to group other nodes (likely the root node, for example).
if not node.active_states or not node.idle_states:
continue
cpus = tuple(node.cpus)
# For now we assume topology nodes with energy models do not overlap
# with frequency domains
freq = freqs[cpus[0]]
assert all(freqs[c] == freq for c in cpus[1:])
# The active time of a node is estimated as the max of the active
# times of its children.
# This works great for the synthetic periodic workloads we use in
# LISA (where all threads wake up at the same time) but is probably
# no good for real workloads.
active_time = max(cpu_active_time[c] for c in cpus)
active_power = node.active_states[freq].power * active_time
_idle_power = max(node.idle_states[idle_states[c]] for c in cpus)
idle_power = _idle_power * (1 - active_time)
if combine:
ret[cpus] = active_power + idle_power
else:
ret[cpus] = {}
ret[cpus]["active"] = active_power
ret[cpus]["idle"] = idle_power
return ret
def estimate_from_cpu_util(self, cpu_utils, freqs=None, idle_states=None):
"""
Estimate the energy usage of the system under a utilization distribution
Optionally also take freqs; a list of frequencies at which each CPU is
assumed to run, and idle_states, the idle states that each CPU can enter
between activations. If not provided, they will be estimated assuming an
ideal selection system (i.e. perfect cpufreq & cpuidle governors).
:param cpu_utils: Utilization distribution, see
:ref:`cpu_utils <cpu-utils>`
:param freqs: List of CPU frequencies. Got from :meth:`guess_freqs` by
default.
:param idle_states: List of CPU frequencies. Got from
:meth:`guess_idle_states` by default.
:returns: Dict with power in bogo-Watts (bW), with contributions from
each system component keyed with a tuple of the CPUs
comprising that component (i.e. :attr:EnergyModelNode.cpus)
::
{
(0,) : 10,
(1,) : 10,
(0, 1) : 5,
}
This represents CPUs 0 and 1 each using 10bW and their shared
resources using 5bW for a total of 25bW.
"""
if len(cpu_utils) != len(self.cpus):
raise ValueError(
'cpu_utils length ({}) must equal CPU count ({})'.format(
len(cpu_utils), len(self.cpus)))
if freqs is None:
freqs = self.guess_freqs(cpu_utils)
if idle_states is None:
idle_states = self.guess_idle_states(cpu_utils)
cpu_active_time = []
for cpu, node in enumerate(self.cpu_nodes):
assert (cpu,) == node.cpus
cap = node.active_states[freqs[cpu]].capacity
cpu_active_time.append(min(float(cpu_utils[cpu]) / cap, 1.0))
return self._estimate_from_active_time(cpu_active_time,
freqs, idle_states, combine=True)
def get_optimal_placements(self, capacities):
"""Find the optimal distribution of work for a set of tasks
Find a list of candidates which are estimated to be optimal in terms of
power consumption, but that do not result in any CPU becoming
over-utilized.
If no such candidates exist, i.e. the system being modeled cannot
satisfy the workload's throughput requirements, an
:class:`EnergyModelCapacityError` is raised. For example, if e was an
EnergyModel modeling two CPUs with capacity 1024, this error would be
raised by:
::
e.get_optimal_placements({"t1": 800, "t2": 800, "t3: "800"})
This estimation assumes an ideal system of selecting OPPs and idle
states for CPUs.
.. note::
This is a brute force search taking time exponential wrt. the number
of tasks.
:param capacities: Dict mapping tasks to expected utilization
values. These tasks are assumed not to change; they
have a single static utilization value. A set of
single-phase periodic RT-App tasks is an example of a
suitable workload for this model.
:returns: List of ``cpu_utils`` items representing distributions of work
under optimal task placements, see
:ref:`cpu_utils <cpu-utils>`. Multiple task placements
that result in the same CPU utilizations are considered
equivalent.
"""
tasks = capacities.keys()
num_candidates = len(self.cpus) ** len(tasks)
self._log.debug(
'%14s - Searching %d configurations for optimal task placement...',
'EnergyModel', num_candidates)
candidates = {}
excluded = []
for cpus in product(self.cpus, repeat=len(tasks)):
placement = {task: cpu for task, cpu in zip(tasks, cpus)}
util = [0 for _ in self.cpus]
for task, cpu in placement.items():
util[cpu] += capacities[task]
util = tuple(util)
# Filter out candidate placements that have tasks greater than max
# or that we have already determined that we cannot place.
if (any(u > self.capacity_scale for u in util) or util in excluded):
continue
if util not in candidates:
freqs, overutilized = self._guess_freqs(util)
if overutilized:
# This isn't a valid placement
excluded.append(util)
else:
power = self.estimate_from_cpu_util(util, freqs=freqs)
candidates[util] = sum(power.values())
if not candidates:
# The system can't provide full throughput to this workload.
raise EnergyModelCapacityError(
"Can't handle workload - total cap = {}".format(
sum(capacities.values())))
# Whittle down to those that give the lowest energy estimate
min_power = min(p for p in candidates.itervalues())
ret = [u for u, p in candidates.iteritems() if p == min_power]
self._log.debug('%14s - Done', 'EnergyModel')
return ret
@classmethod
def _find_core_groups(cls, target):
"""
Read the core_siblings masks for each CPU from sysfs
:param target: Devlib Target object to read masks from
:returns: A list of tuples of ints, representing the partition of core
siblings
"""
cpus = range(target.number_of_cpus)
topology_base = '/sys/devices/system/cpu/'
# We only care about core_siblings, but let's check *_siblings, so we
# can throw an error if a CPU's thread_siblings isn't just itself, or if
# there's a topology level we don't understand.
# Since we might have to read a lot of files, read everything we need in
# one go to avoid taking too long.
mask_glob = topology_base + 'cpu**/topology/*_siblings'
file_values = read_multiple_oneline_files(target, [mask_glob])
regex = re.compile(
topology_base + r'cpu([0-9]+)/topology/([a-z]+)_siblings')
ret = set()
for path, mask_str in file_values.iteritems():
match = regex.match(path)
cpu = int(match.groups()[0])
level = match.groups()[1]
# mask_to_list returns the values in descending order, so we'll sort
# them ascending. This isn't strictly necessary but it's nicer.
siblings = tuple(sorted(mask_to_list(int(mask_str, 16))))
if level == 'thread':
if siblings != (cpu,):
# SMT systems aren't supported
raise RuntimeError('CPU{} thread_siblings is {}. '
'expected {}'.format(cpu, siblings, [cpu]))
continue
if level != 'core':
# The only other levels we should expect to find are 'book' and
# 'shelf', which are not used by architectures we support.
raise RuntimeError(
'Unrecognised topology level "{}"'.format(level))
ret.add(siblings)
# Sort core groups so that the lowest-numbered cores are first
# Again, not strictly necessary, just more pleasant.
return sorted(ret, key=lambda x: x[0])
@classmethod
def from_target(cls, target):
"""
Create an EnergyModel by reading a target filesystem
This uses the sysctl added by EAS pathces to exposes the cap_states and
idle_states fields for each sched_group. This feature depends on
CONFIG_SCHED_DEBUG, and is not upstream in mainline Linux (as of v4.11),
so this method is only tested with Android kernels.
The kernel doesn't have an power domain data, so this method assumes
that all CPUs are totally independent wrt. idle states - the EnergyModel
constructed won't be aware of the topological dependencies for entering
"cluster" idle states.
Assumes the energy model has two-levels (plus the root) - a level for
CPUs and a level for 'clusters'.
:param target: Devlib target object to read filesystem from. Must have
cpufreq and cpuidle modules enabled.
:returns: Constructed EnergyModel object based on the parameters
reported by the target.
"""
if 'cpufreq' not in target.modules:
raise TargetError('Requires cpufreq devlib module. Please ensure '
'"cpufreq" is listed in your target/test modules')
if 'cpuidle' not in target.modules:
raise TargetError('Requires cpuidle devlib module. Please ensure '
'"cpuidle" is listed in your target/test modules')
def sge_path(cpu, domain, group, field):
f = '/proc/sys/kernel/sched_domain/cpu{}/domain{}/group{}/energy/{}'
return f.format(cpu, domain, group, field)
# Read all the files we might need in one go, otherwise this will take
# ages.
sge_globs = [sge_path('**', '**', '**', 'cap_states'),
sge_path('**', '**', '**', 'idle_states')]
sge_file_values = read_multiple_oneline_files(target, sge_globs)
if not sge_file_values:
raise TargetError('Energy Model not exposed in sysfs. '
'Check CONFIG_SCHED_DEBUG is enabled.')
# These functions read the cap_states and idle_states vectors for the
# first sched_group in the sched_domain for a given CPU at a given
# level. That first group will include the given CPU. So
# read_active_states(0, 0) will give the CPU-level active_states for
# CPU0 and read_active_states(0, 1) will give the "cluster"-level
# active_states for the "cluster" that contains CPU0.
def read_sge_file(path):
try:
return sge_file_values[path]
except KeyError as e:
raise TargetError('No such file: {}'.format(e))
def read_active_states(cpu, domain_level):
cap_states_path = sge_path(cpu, domain_level, 0, 'cap_states')
cap_states_strs = read_sge_file(cap_states_path).split()
# cap_states lists the capacity of each state followed by its power,
# in increasing order. The `zip` call does this:
# [c0, p0, c1, p1, c2, p2] -> [(c0, p0), (c1, p1), (c2, p2)]
cap_states = [ActiveState(capacity=int(c), power=int(p))
for c, p in zip(cap_states_strs[0::2],
cap_states_strs[1::2])]
freqs = target.cpufreq.list_frequencies(cpu)
return OrderedDict(zip(sorted(freqs), cap_states))
def read_idle_states(cpu, domain_level):
idle_states_path = sge_path(cpu, domain_level, 0, 'idle_states')
idle_states_strs = read_sge_file(idle_states_path).split()
# get_states should return the state names in increasing depth order
names = [s.name for s in target.cpuidle.get_states(cpu)]
# idle_states is a list of power values in increasing order of
# idle-depth/decreasing order of power.
return OrderedDict(zip(names, [int(p) for p in idle_states_strs]))
# Read the CPU-level data from sched_domain level 0
cpus = range(target.number_of_cpus)
cpu_nodes = []
for cpu in cpus:
node = EnergyModelNode(
cpu=cpu,
active_states=read_active_states(cpu, 0),
idle_states=read_idle_states(cpu, 0))
cpu_nodes.append(node)
# Read the "cluster" level data from sched_domain level 1
core_group_nodes = []
for core_group in cls._find_core_groups(target):
node=EnergyModelNode(
children=[cpu_nodes[c] for c in core_group],
active_states=read_active_states(core_group[0], 1),
idle_states=read_idle_states(core_group[0], 1))
core_group_nodes.append(node)
root = EnergyModelRoot(children=core_group_nodes)
# Use cpufreq to figure out the frequency domains
freq_domains = []
remaining_cpus = set(cpus)
while remaining_cpus:
cpu = next(iter(remaining_cpus))
dom = target.cpufreq.get_domain_cpus(cpu)
freq_domains.append(dom)
remaining_cpus = remaining_cpus.difference(dom)
# We don't have a way to read the power domains from sysfs (the kernel
# isn't even aware of them) so we'll just have to assume each CPU is its
# own power domain and all idle states are independent of each other.
cpu_pds = []
for cpu in cpus:
names = [s.name for s in target.cpuidle.get_states(cpu)]
cpu_pds.append(PowerDomain(cpu=cpu, idle_states=names))
root_pd=PowerDomain(children=cpu_pds, idle_states=[])
return cls(root_node=root,
root_power_domain=root_pd,
freq_domains=freq_domains)
| apache-2.0 |
dpaiton/OpenPV | pv-core/analysis/python/plot_amoeba_response.py | 1 | 4052 | """
Make a histogram of normally distributed random numbers and plot the
analytic PDF over it
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import matplotlib.image as mpimg
import PVReadWeights as rw
import PVReadSparse as rs
import math
"""
mi=mpimg.imread(sys.argv[3])
imgplot = plt.imshow(mi, interpolation='Nearest')
imgplot.set_cmap('hot')
plt.show()
"""
def nearby_neighbor(kzPre, zScaleLog2Pre, zScaleLog2Post):
a = math.pow(2.0, (zScaleLog2Pre - zScaleLog2Post))
ia = a
if ia < 2:
k0 = 0
else:
k0 = ia/2 - 1
if a < 1.0 and kzPre < 0:
k = kzPre - (1.0/a) + 1
else:
k = kzPre
return k0 + (a * k)
def zPatchHead(kzPre, nzPatch, zScaleLog2Pre, zScaleLog2Post):
a = math.pow(2.0, (zScaleLog2Pre - zScaleLog2Post))
if a == 1:
shift = -(0.5 * nzPatch)
return shift + nearby_neighbor(kzPre, zScaleLog2Pre, zScaleLog2Post)
shift = 1 - (0.5 * nzPatch)
if (nzPatch % 2) == 0 and a < 1:
kpos = (kzPre < 0)
if kzPre < 0:
kpos = -(1+kzPre)
else:
kpos = kzPre
l = (2*a*kpos) % 2
if kzPre < 0:
shift -= l == 1
else:
shift -= l == 0
elif (nzPatch % 2) == 1 and a < 1:
shift = -(0.5 * nzPatch)
neighbor = nearby_neighbor(kzPre, zScaleLog2Pre, zScaleLog2Post)
if nzPatch == 1:
return neighbor
return shift + neighbor
"""
a = zPatchHead(int(sys.argv[1]), 5, -math.log(4, 2), -math.log(1, 2))
print a
print int(a)
sys.exit()
"""
vmax = 100.0 # Hz
space = 1
extended = False
w = rw.PVReadWeights(sys.argv[1])
wOff = rw.PVReadWeights(sys.argv[2])
nx = w.nx
ny = w.ny
nxp = w.nxp
nyp = w.nyp
nx_im = nx * (nxp + space) + space
ny_im = ny * (nyp + space) + space
predub = np.zeros(((nx*nx),(nxp * nxp)))
predubOff = np.zeros(((nx*nx),(nxp * nxp)))
numpat = w.numPatches
print "numpat = ", numpat
for k in range(numpat):
p = w.next_patch()
pOff = wOff.next_patch()
predub[k] = p
predubOff[k] = pOff
print "weights done"
#print "p = ", P
#if k == 500:
# sys.exit()
#end fig loop
activ = rs.PVReadSparse(sys.argv[3], extended)
end = int(sys.argv[4])
step = int(sys.argv[5])
begin = int(sys.argv[6])
count = 0
for end in range(begin+step, end, step):
A = activ.avg_activity(begin, end)
this = 7 + count
count += 1
print "this = ", this
print "file = ", sys.argv[this]
print
numrows, numcols = A.shape
min = np.min(A)
max = np.max(A)
s = np.zeros(numcols)
for col in range(numcols):
s[col] = np.sum(A[:,col])
s = s/numrows
b = np.reshape(A, (len(A)* len(A)))
c = np.shape(b)[0]
mi=mpimg.imread(sys.argv[this])
print "a w start"
rr = nx / 64
im = np.zeros((64, 64))
for yi in range(len(A)):
for xi in range(len(A)):
x = int(zPatchHead(int(xi), 5, -math.log(rr, 2), -math.log(1, 2)))
y = int(zPatchHead(int(yi), 5, -math.log(rr, 2), -math.log(1, 2)))
if 58 > x >= 0 and 58 > y >= 0:
if A[yi, xi] > 0:
patch = predub[yi * (nx) + xi]
patchOff = predubOff[yi * (nx) + xi]
patch = np.reshape(patch, (nxp, nxp))
patchOff = np.reshape(patchOff, (nxp, nxp))
for yy in range(nyp):
for xx in range(nxp):
im[y + yy, x + xx] += patch[yy, xx] * A[yi, xi]
im[y + yy, x + xx] -= patchOff[yy, xx] * A[yi, xi]
fig = plt.figure()
ax = fig.add_subplot(3,1,1)
ax.imshow(mi, interpolation='Nearest', cmap='gray')
ax = fig.add_subplot(3,1,2)
#ax.imshow(mi, interpolation='Nearest', cmap='gray', origin="lower")
ax.set_xlabel('activity')
ax.imshow(A, cmap=cm.jet, interpolation='nearest', vmin = 0.0, vmax = np.max(A))
ax = fig.add_subplot(313)
ax.set_xlabel('image reconstruction')
ax.imshow(im, cmap=cm.jet, interpolation='nearest', vmin = 0.0, vmax = np.max(im))
plt.show()
#end fig loop
| epl-1.0 |
rseubert/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 248 | 6359 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
Tong-Chen/scikit-learn | examples/mixture/plot_gmm_sin.py | 12 | 2726 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 gaussian components, finds too-small components and very
little structure. The fits by the dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import pylab as pl
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = pl.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
pl.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
pl.xlim(-6, 4 * np.pi - 6)
pl.ylim(-5, 5)
pl.title(title)
pl.xticks(())
pl.yticks(())
pl.show()
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/pyplots/pyplot_annotate.py | 1 | 1163 | """
===============
Pyplot Annotate
===============
"""
import numpy as np
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
ax = plt.subplot(111)
t = np.arange(0.0, 5.0, 0.01)
s = np.cos(2*np.pi*t)
line, = plt.plot(t, s, lw=2)
plt.annotate('local max', xy=(2, 1), xytext=(3, 1.5),
arrowprops=dict(facecolor='black', shrink=0.05),
)
plt.ylim(-2,2)
pltshow(plt)
| mit |
wxgeo/geophar | wxgeometrie/mathlib/tests/test_parsers.py | 1 | 12300 | # -*- coding: utf-8 -*-
#from tools.testlib import *
import re
from pytest import XFAIL
from wxgeometrie.mathlib import universal_functions
from wxgeometrie.mathlib.parsers import (traduire_formule, NBR, NBR_SIGNE, VAR,
VAR_NOT_ATTR, NBR_OR_VAR, _arguments_latex,
convertir_en_latex, _fast_closing_bracket_search,
_fast_opening_bracket_search, mathtext_parser,
_rechercher_numerateur, _rechercher_denominateur,
)
from tools.testlib import assertEqual
liste_fonctions = [key for key in universal_functions.__dict__ if "_" not in key]
liste_fonctions.append("limite")
liste_fonctions.append("log10")
liste_fonctions.append("mat")
liste_fonctions.append('range')
def assert_formule(x, y, OOo, LaTeX):
y_ = traduire_formule(x, fonctions = liste_fonctions, OOo = OOo, LaTeX = LaTeX, verbose = False)
assertEqual(y_, y)
##if y_ != y:
##print "/!\\ Formule: ", x
##traduire_formule(x, fonctions = liste_fonctions, OOo = OOo, LaTeX = LaTeX, verbose = True)
##print "ERREUR: ", y_, " != ", y
##assert (y_ == y)
def assert_arg_latex(x, *y):
x = _arguments_latex(x, 2)
y = list(y)
##if x != y:
##print "ERREUR (_arguments_latex): ", x, " != ", y
assertEqual(x, y)
def assert_all(x, y):
assert_formule(x, y, OOo = True, LaTeX = False)
assert_formule(x, y, OOo = True, LaTeX = False)
assert_formule(x, y, OOo = False, LaTeX = True)
assert_formule(x, y, OOo = True, LaTeX = True)
def assert_OOo(x, y):
assert_formule(x, y, OOo = True, LaTeX = False)
assert_formule(x, y, OOo = True, LaTeX = True)
def assert_latex(x, y):
assert_formule(x, y, OOo = False, LaTeX = True)
assert_formule(x, y, OOo = True, LaTeX = True)
def assert_match(pattern, chaine):
"""Teste si la chaine correspond entièrement au pattern."""
assert (re.match(pattern + "$", chaine))
def assert_not_match(pattern, chaine):
"""Teste si la chaine ne correspond pas entièrement au pattern."""
assert (not re.match(pattern + "$", chaine))
def assert_VAR(chaine):
assert_match(VAR, chaine)
assert_match(NBR_OR_VAR, chaine)
def assert_not_VAR(chaine):
assert_not_match(VAR, chaine)
#def assert_NBR(chaine):
# assert_match(NBR, chaine)
# assert_match(NBR_OR_VAR, chaine)
def assert_NBR(chaine):
assert_match(NBR, chaine)
assert_match(NBR_SIGNE, chaine)
assert_match(NBR_OR_VAR, chaine)
def assert_NBR_SIGNE(chaine):
assert_match(NBR_SIGNE, chaine)
assert_not_match(NBR_OR_VAR, chaine)
def assert_find_VAR_NOT_ATTR(chaine):
assert (re.search(VAR_NOT_ATTR, chaine))
def assert_not_find_VAR_NOT_ATTR(chaine):
assert (not re.search(VAR_NOT_ATTR, chaine))
def assert_not_NBR(chaine):
assert_not_match(NBR, chaine)
def test_tous_modes():
assert_all('a z', 'a*z')
assert_all("2x+3", "2*x+3")
assert_all("2(x+3)", "2*(x+3)")
assert_all("(x+1)x(x+3)", "(x+1)*x*(x+3)")
assert_all("sin(x+1)x(x+3)", "sin(x+1)*x*(x+3)")
assert_all("(x+1)cos(x+3)", "(x+1)*cos(x+3)")
assert_all("-1.5x^(-2)+ab+3ab(2x+y)+x(y(z+1)))2(x)",
"-1.5*x**(-2)+ab+3*ab*(2*x+y)+x*(y*(z+1)))*2*(x)")
assert_all("3x³-2x²y-2x==5y", "3*x**3-2*x**2*y-2*x==5*y")
assert_all("25%*12 mod 5", "25/100*12%5")
assert_all("(25%*12)mod 5", "(25/100*12)%5")
assert_all("limite(1/x^3,x,1+)", "limite(1/x**3,x,1,'+')")
assert_all("limite(1/x^3,x, 1- )", "limite(1/x**3,x,1,'-')")
assert_all("x sin x+1", "x*sin(x)+1")
assert_all("log10 ab y sin 2x+1", "log10(ab)*y*sin(2*x)+1")
assert_all("cos 3.5x(x+1)", "cos(3.5*x)*(x+1)")
assert_all("cos 2", "cos(2)")
# Cas particulier :
assert_all("cos -3", "cos-3")
# Développement décimal infini périodique
assert_all("17.03[45]", "((1703+45/99)/100)")
assert_all("17.[045]", "((17+45/999)/1)")
assert_all("17.1[0]", "((171+0/9)/10)")
# Ne pas rajouter de * devant les parenthèses d'une méthode
assert_all("A.transpose()", "A.transpose()")
assert_all("[j for j in liste]", "[j for j in liste]")
# Caractères unicode
assert_all("\u2013x\u22123\u00D7y\u00F7z²", "-x-3*y/z**2")
# * entre un flottant et une parenthese
assert_all(".015(x-50)^2-20", ".015*(x-50)**2-20")
assert_all("-1.015 (x-50)", "-1.015*(x-50)")
assert_all('5|x+3|+1-|2x|', '5*abs(x+3)+1-abs(2*x)')
assert_all('[f for j in range(1, 11)]', '[f for j in range(1,11)]')
def test_texte():
# Texte entre guillemets "texte" ou """texte""" inchangé.
assert_all("'1.2345'", "'1.2345'")
assert_all('"ok"', '"ok"')
assert_all('"x(x+1)" x(x+1) """x(x+1) " """', '"x(x+1)"x*(x+1)"""x(x+1) " """')
assert_all(r'"\""', r'"\""')
assert_all(r'"""\"+1\" ici, et non \"+n\""""', r'"""\"+1\" ici, et non \"+n\""""')
def test_matrice():
# Rajouter mat() quand il n'y est pas.
assert_all("[[1, 2], [3, 4]]", "mat([[1,2],[3,4]])")
assert_all("[ [1,2;2,5] ; [-3,4;4,2] ]", "mat([[1.2,2.5],[-3.4,4.2]])")
# Ne pas rajouter mat() quand il y est déjà.
assert_all("mat([[1, 2], [3, 4]])", "mat([[1,2],[3,4]])")
assert_all("mat( [[1, 2], [3, 4]] )", "mat([[1,2],[3,4]])")
def test_mode_OOo():
assert_OOo("2 times 3", "2*3")
assert_OOo("2 over 3", "2/3")
assert_OOo("{2+5x} over {3-x}", "(2+5*x)/(3-x)")
assert_OOo("{2-.5x}over{3-x}", "(2-.5*x)/(3-x)")
assert_OOo("0.85 sup {1 over 7} - 1", "0.85**(1/7)-1")
def test_mode_LaTeX():
assert_latex("2\\times3", "2*3")
assert_latex("\\cos x\\sin x\\exp x", "cos(x)*sin(x)*exp(x)")
assert_latex("\\frac{2}{3}", "((2)/(3))")
assert_latex("\\frac{2+x}{3}", "((2+x)/(3))")
assert_latex("\\dfrac{2+x}{1-3}", "((2+x)/(1-3))")
assert_latex("\\tfrac{2+x}{3}", "((2+x)/(3))")
assert_latex("\\dfrac{2x^2+x-7}{6-4x}", "((2*x**2+x-7)/(6-4*x))")
assert_latex("-\\frac12-\\dfrac4{(6-4x)^2}", "-(1/2)-(4/((6-4*x)**2))")
assert_latex("\\left((1+10~\\%)(1+5~\\%)(1-7~\\%)\\right)^{\\frac{1}{3} }",
"((1+10/100)*(1+5/100)*(1-7/100))**(((1)/(3)))")
assert_latex("\\text{0.7}\\times (-50)^2-9\\times (-50)+200", "(0.7)*(-50)**2-9*(-50)+200")
assert_latex("\\ln(2)+\\exp(3)+\\log(\\pi+1)", "ln(2)+exp(3)+log(pi+1)")
assert_latex("x\ge1\le3", "x>=1<=3")
assert_latex(r"100\left(\left(1+\dfrac{50}{100}\right)^\frac{1}{10}-1\right)",
"100*((1+((50)/(100)))**((1)/(10))-1)")
assert_latex("M = \\begin{pmatrix}\n0,6 & 0,4\\\\\n0,75& 0,25\\\\\n\\end{pmatrix}",
'M=mat([[0.6,0.4],[0.75,0.25]])')
assert_latex(r"\begin{pmatrix}0.65& 0.35\end{pmatrix}\begin{pmatrix}0.55 & 0.45\\0.3 & 0.7\end{pmatrix}",
"mat([[0.65,0.35]])*mat([[0.55,0.45],[0.3,0.7]])")
def test_NBR():
assert_NBR_SIGNE("-2.56")
assert_NBR_SIGNE("-.56")
assert_NBR_SIGNE("+5.")
assert_NBR_SIGNE("+5.056")
assert_NBR("56")
assert_NBR(".46")
assert_NBR(".015")
assert_NBR("752.")
assert_NBR("740.54")
assert_not_NBR("5-6")
assert_not_NBR(".")
# Regression test for issue FS#252
assert_match('\(' + NBR_SIGNE, "(-2.3")
def test_VAR():
assert_VAR("Arertytre")
assert_VAR("a")
assert_VAR("_")
assert_VAR("_45ui")
assert_VAR("A13")
assert_not_VAR("1A")
assert_not_VAR("2")
def test_search_VAR_NOT_ATTR():
assert_find_VAR_NOT_ATTR("a")
assert_find_VAR_NOT_ATTR("1+_arrt9876")
assert_find_VAR_NOT_ATTR("5*t_566")
assert_find_VAR_NOT_ATTR("(_.t)/3")
assert_not_find_VAR_NOT_ATTR(".aert")
assert_not_find_VAR_NOT_ATTR("4.tyu+4")
assert_not_find_VAR_NOT_ATTR("89eeazt")
assert_not_find_VAR_NOT_ATTR("2-._ez")
def test_arguments_LaTeX():
assert_arg_latex('2{x+1}+4', '2', '{x+1}', '+4')
assert_arg_latex('{x+2}5+4x-17^{2+x}', '{x+2}', '5', '+4x-17^{2+x}')
# -----------------------------------------------------------
# Tests conversion chaines : calcul au format Python -> LaTeX
# -----------------------------------------------------------
def assert_conv(input, output):
assertEqual(convertir_en_latex(input), '$%s$' %output)
def test_convertir_en_LaTeX():
assert_conv('2*x', '2 x')
assert_conv('2*3', r'2\times 3')
#TODO: retourner 0.005 au lieu de .005
assert_conv('2*.005', r'2\times .005')
assert_conv('2*x+3', '2 x+3')
assert_conv('x**-3.5+x**2*y-x**(2*y)+8', 'x^{-3.5}+x^{2} y-x^{2 y}+8')
assert_conv('--x+-----3--y', 'x-3+y')
assert_conv('sqrt(x) + exp(-y)', r'\sqrt{x}+\exp(-y)')
assert_conv('+oo', r'+\infty')
def test_convertir_en_LaTeX_mode_dollars():
assertEqual(convertir_en_latex('-1', mode='$'), '$-1$')
assertEqual(convertir_en_latex('', mode='$'), '') # '' et non '$$' !
def test_convertir_en_LaTeX_fractions():
assert_conv('2/3', r'\frac{2}{3}')
assert_conv('-2/3', r'-\frac{2}{3}')
assert_conv('x**(2/3)', r'x^{\frac{2}{3}}')
assert_conv('(x+1)/(2*x)', r'\frac{x+1}{2 x}')
assert_conv('(x(x+1))/(2*x*(x+2)*(x**25+7*x+5))*(x+3)',
r'\frac{x(x+1)}{2 x (x+2) (x^{25}+7 x+5)} (x+3)')
assert_conv('2/3x', r'\frac{2}{3}x')
assert_conv('2/0.4', r'\frac{2}{0.4}')
def test_convertir_en_LaTeX_fractions_imbriquees():
assert_conv('(9*x+3/7)/(-8*x-6)', r'\frac{9 x+\frac{3}{7}}{-8 x-6}')
assert_conv('2/3/4', r'\frac{\frac{2}{3}}{4}')
assert_conv('25/(4/7)+8/pi', r'\frac{25}{\frac{4}{7}}+\frac{8}{\pi}')
assert_conv('(2/3)/(25/(4/7)+8/pi)',
r'\frac{\frac{2}{3}}{\frac{25}{\frac{4}{7}}+\frac{8}{\pi}}')
def test_convertir_en_LaTeX_bad_expression():
# XXX: Par défaut, quand l'expression n'est pas valide, la valeur
# retournée doit être la valeur entrée ??
# Pour l'instant, aucun comportement clair n'est défini lorsqu'une
# expression mathématiques invalide est entrée.
# Simplement, le parser ne doit pas planter.
assert_conv('2/', '2/')
assert_conv('/', '/')
assert_conv('-+', '-')
# Un signe plus ou un signe moins isolé peuvent être utiles
assert_conv('+', '+')
def test_parentheses_inutiles():
assert_conv('(x+1)', 'x+1')
assert_conv('(x(x+1))', 'x(x+1)')
assert_conv('(((x)))', 'x')
def test_convertir_en_LaTeX_mode_None():
assertEqual(convertir_en_latex('2*x', mode=None), '2 x')
def assert_search(input, expected):
i = _fast_closing_bracket_search(input)
assertEqual(input[:i], expected)
def test_fast_closing_bracket_search():
assert_search('(ok)', '(ok)')
assert_search('(ok)+3', '(ok)')
assert_search('(x+(x-3(x-4))+(x-7))-x(x+8)', '(x+(x-3(x-4))+(x-7))')
def assert_backsearch(input, expected):
i = _fast_opening_bracket_search(input)
assertEqual(input[i:], expected)
def test_fast_opening_bracket_search():
assert_backsearch('(ok)', '(ok)')
assert_backsearch('3+(ok)', '(ok)')
assert_backsearch('x(x+8)-(x+(x-3(x-4))+(x-7))', '(x+(x-3(x-4))+(x-7))')
def assert_numerateur(input, expected):
i = _rechercher_numerateur(input)
assert i is not None
assertEqual(input[i:], expected)
def test_rechercher_numerateur():
assert_numerateur('2', '2')
assert_numerateur('-2', '2')
assert_numerateur('1+2', '2')
assert_numerateur('cos(x)', 'cos(x)')
assert_numerateur('x-2^cos(x)', '2^cos(x)')
assert_numerateur('3-(x+1)^(x-2^cos(x))', '(x+1)^(x-2^cos(x))')
assert_numerateur('3-@', '@')
assert_numerateur('(4/7)+8', '8')
assert_numerateur('@+8', '8')
def assert_denominateur(input, expected):
i = _rechercher_denominateur(input)
assert i is not None
assertEqual(input[:i], expected)
def test_rechercher_denominateur():
assert_denominateur('2', '2')
assert_denominateur('-2', '-2')
assert_denominateur('1+2', '1')
assert_denominateur('cos(x)-x', 'cos(x)')
assert_denominateur('2^cos(x)-x', '2^cos(x)')
assert_denominateur('(x+1)^(x-2^cos(x))-3', '(x+1)^(x-2^cos(x))')
def test_mathtext_parser():
"On teste simplement qu'aucune erreur n'est renvoyée."
# Bug matplotlib 1.1.1
mathtext_parser("$A'$")
mathtext_parser("$A'$")
mathtext_parser("$f'$ est la dérivée")
mathtext_parser("$1^{er}$ dé")
mathtext_parser(r"$\left]-\infty;\frac{1}{3}\right]\cup\left[2;5\right[$")
| gpl-2.0 |
aflaxman/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 50 | 2007 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
# load the data
digits = load_digits()
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/mpl_toolkits/axisartist/grid_helper_curvelinear.py | 1 | 25266 | """
An experimental support for curvilinear grid.
"""
from itertools import chain
from grid_finder import GridFinder
from axislines import \
AxisArtistHelper, GridHelperBase
from axis_artist import AxisArtist
from matplotlib.transforms import Affine2D, IdentityTransform
import numpy as np
from matplotlib.path import Path
class FixedAxisArtistHelper(AxisArtistHelper.Fixed):
"""
Helper class for a fixed axis.
"""
def __init__(self, grid_helper, side, nth_coord_ticks=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FixedAxisArtistHelper, self).__init__( \
loc=side,
)
self.grid_helper = grid_helper
if nth_coord_ticks is None:
nth_coord_ticks = self.nth_coord
self.nth_coord_ticks = nth_coord_ticks
self.side = side
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
def change_tick_coord(self, coord_number=None):
if coord_number is None:
self.nth_coord_ticks = 1 - self.nth_coord_ticks
elif coord_number in [0, 1]:
self.nth_coord_ticks = coord_number
else:
raise Exception("wrong coord number")
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
g = self.grid_helper
ti1 = g.get_tick_iterator(self.nth_coord_ticks, self.side)
ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, self.side, minor=True)
#ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, self.side, minor=True)
return chain(ti1, ti2), iter([])
class FloatingAxisArtistHelper(AxisArtistHelper.Floating):
def __init__(self, grid_helper, nth_coord, value, axis_direction=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FloatingAxisArtistHelper, self).__init__(nth_coord,
value,
)
self.value = value
self.grid_helper = grid_helper
self._extremes = None, None
self._get_line_path = None # a method that returns a Path.
self._line_num_points = 100 # number of points to create a line
def set_extremes(self, e1, e2):
self._extremes = e1, e2
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
grid_finder = self.grid_helper.grid_finder
extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
x1, y1, x2, y2)
extremes = list(extremes)
e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
if e1 is not None:
extremes[2] = max(e1, extremes[2])
if e2 is not None:
extremes[3] = min(e2, extremes[3])
elif self.nth_coord == 1:
if e1 is not None:
extremes[0] = max(e1, extremes[0])
if e2 is not None:
extremes[1] = min(e2, extremes[1])
grid_info = dict()
lon_min, lon_max, lat_min, lat_max = extremes
lon_levs, lon_n, lon_factor = \
grid_finder.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
grid_finder.grid_locator2(lat_min, lat_max)
grid_info["extremes"] = extremes
grid_info["lon_info"] = lon_levs, lon_n, lon_factor
grid_info["lat_info"] = lat_levs, lat_n, lat_factor
grid_info["lon_labels"] = grid_finder.tick_formatter1("bottom",
lon_factor,
lon_levs)
grid_info["lat_labels"] = grid_finder.tick_formatter2("bottom",
lat_factor,
lat_levs)
grid_finder = self.grid_helper.grid_finder
#e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
xx0 = np.linspace(self.value, self.value, self._line_num_points)
yy0 = np.linspace(extremes[2], extremes[3], self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
elif self.nth_coord == 1:
xx0 = np.linspace(extremes[0], extremes[1], self._line_num_points)
yy0 = np.linspace(self.value, self.value, self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
grid_info["line_xy"] = xx, yy
self.grid_info = grid_info
def get_axislabel_transform(self, axes):
return Affine2D() #axes.transData
def get_axislabel_pos_angle(self, axes):
extremes = self.grid_info["extremes"]
if self.nth_coord == 0:
xx0 = self.value
yy0 = (extremes[2]+extremes[3])/2.
dxx, dyy = 0., abs(extremes[2]-extremes[3])/1000.
elif self.nth_coord == 1:
xx0 = (extremes[0]+extremes[1])/2.
yy0 = self.value
dxx, dyy = abs(extremes[0]-extremes[1])/1000., 0.
grid_finder = self.grid_helper.grid_finder
xx1, yy1 = grid_finder.transform_xy([xx0], [yy0])
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([xx1[0], yy1[0]])
if (0. <= p[0] <= 1.) and (0. <= p[1] <= 1.):
xx1c, yy1c = axes.transData.transform_point([xx1[0], yy1[0]])
xx2, yy2 = grid_finder.transform_xy([xx0+dxx], [yy0+dyy])
xx2c, yy2c = axes.transData.transform_point([xx2[0], yy2[0]])
return (xx1c, yy1c), np.arctan2(yy2c-yy1c, xx2c-xx1c)/np.pi*180.
else:
return None, None
def get_tick_transform(self, axes):
return IdentityTransform() #axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label, (optionally) tick_label"""
grid_finder = self.grid_helper.grid_finder
lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
lat_levs = np.asarray(lat_levs)
if lat_factor is not None:
yy0 = lat_levs / lat_factor
dy = 0.01 / lat_factor
else:
yy0 = lat_levs
dy = 0.01
lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
lon_levs = np.asarray(lon_levs)
if lon_factor is not None:
xx0 = lon_levs / lon_factor
dx = 0.01 / lon_factor
else:
xx0 = lon_levs
dx = 0.01
e0, e1 = sorted(self._extremes)
if e0 is None:
e0 = -np.inf
if e1 is None:
e1 = np.inf
if self.nth_coord == 0:
mask = (e0 <= yy0) & (yy0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
yy0 = yy0[mask]
elif self.nth_coord == 1:
mask = (e0 <= xx0) & (xx0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
xx0 = xx0[mask]
def transform_xy(x, y):
x1, y1 = grid_finder.transform_xy(x, y)
x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
x2, y2 = x2y2.transpose()
return x2, y2
# find angles
if self.nth_coord == 0:
xx0 = np.empty_like(yy0)
xx0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx1a, yy1a = transform_xy(xx00, yy0)
xx1b, yy1b = transform_xy(xx00+dx, yy0)
xx2a, yy2a = transform_xy(xx0, yy0)
xx2b, yy2b = transform_xy(xx0, yy0+dy)
labels = self.grid_info["lat_labels"]
labels = [l for l, m in zip(labels, mask) if m]
elif self.nth_coord == 1:
yy0 = np.empty_like(xx0)
yy0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx1a, yy1a = transform_xy(xx0, yy0)
xx1b, yy1b = transform_xy(xx0, yy0+dy)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx2a, yy2a = transform_xy(xx00, yy0)
xx2b, yy2b = transform_xy(xx00+dx, yy0)
labels = self.grid_info["lon_labels"]
labels = [l for l, m in zip(labels, mask) if m]
def f1():
dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
mm = ((yy1b-yy1a)==0.) & ((xx1b-xx1a)==0.) # mask where dd1 is not defined
dd[mm] = dd2[mm]+3.14159/2.
#dd = np.arctan2(yy2-yy1, xx2-xx1) # angle normal
#dd2 = np.arctan2(yy3-yy1, xx3-xx1) # angle tangent
#mm = ((yy2-yy1)==0.) & ((xx2-xx1)==0.) # mask where dd1 is not defined
#dd[mm] = dd2[mm]+3.14159/2.
#dd += 3.14159
#dd = np.arctan2(xx2-xx1, angle_tangent-yy1)
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
c2 = tr2ax.transform_point((x, y))
delta=0.00001
if (0. -delta<= c2[0] <= 1.+delta) and \
(0. -delta<= c2[1] <= 1.+delta):
d1 = d/3.14159*180.
d2 = d2/3.14159*180.
yield [x, y], d1, d2, lab
return f1(), iter([])
def get_line_transform(self, axes):
return axes.transData
def get_line(self, axes):
self.update_lim(axes)
x, y = self.grid_info["line_xy"]
if self._get_line_path is None:
return Path(zip(x, y))
else:
return self._get_line_path(axes, x, y)
class GridHelperCurveLinear(GridHelperBase):
def __init__(self, aux_trans,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
aux_trans : a transform from the source (curved) coordinate to
target (rectilinear) coordinate. An instance of MPL's Transform
(inverse transform should be defined) or a tuple of two callable
objects which defines the transform and its inverse. The callables
need take two arguments of array of source coordinates and
should return two target coordinates:
e.g. x2, y2 = trans(x1, y1)
"""
super(GridHelperCurveLinear, self).__init__()
self.grid_info = None
self._old_values = None
#self._grid_params = dict()
self._aux_trans = aux_trans
self.grid_finder = GridFinder(aux_trans,
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1,
tick_formatter2)
def update_grid_finder(self, aux_trans=None, **kw):
if aux_trans is not None:
self.grid_finder.update_transform(aux_trans)
self.grid_finder.update(**kw)
self.invalidate()
def _update(self, x1, x2, y1, y2):
"bbox in 0-based image coordinates"
# update wcsgrid
if self.valid() and self._old_values == (x1, x2, y1, y2):
return
self._update_grid(x1, y1, x2, y2)
self._old_values = (x1, x2, y1, y2)
self._force_update = False
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None):
if axes is None:
axes = self.axes
if axis_direction is None:
axis_direction = loc
_helper = FixedAxisArtistHelper(self, loc,
#nth_coord,
nth_coord_ticks=nth_coord,
)
axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
return axisline
def new_floating_axis(self, nth_coord,
value,
axes=None,
axis_direction="bottom"
):
if axes is None:
axes = self.axes
_helper = FloatingAxisArtistHelper( \
self, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
#_helper = FloatingAxisArtistHelper(self, nth_coord,
# value,
# label_direction=label_direction,
# )
#axisline = AxisArtistFloating(axes, _helper,
# axis_direction=axis_direction)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
#axisline.major_ticklabels.set_visible(True)
#axisline.minor_ticklabels.set_visible(False)
#axisline.major_ticklabels.set_rotate_along_line(True)
#axisline.set_rotate_label_along_line(True)
return axisline
def _update_grid(self, x1, y1, x2, y2):
self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
def get_gridlines(self):
grid_lines = []
for gl in self.grid_info["lat"]["lines"]:
grid_lines.extend(gl)
for gl in self.grid_info["lon"]["lines"]:
grid_lines.extend(gl)
return grid_lines
def get_tick_iterator(self, nth_coord, axis_side, minor=False):
#axisnr = dict(left=0, bottom=1, right=2, top=3)[axis_side]
angle_tangent = dict(left=90, right=90, bottom=0, top=0)[axis_side]
#angle = [0, 90, 180, 270][axisnr]
lon_or_lat = ["lon", "lat"][nth_coord]
if not minor: # major ticks
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, l
else:
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, ""
#for xy, a, l in self.grid_info[lon_or_lat]["ticks"][axis_side]:
# yield xy, a, ""
return f()
def test3():
import numpy as np
from matplotlib.transforms import Transform
from matplotlib.path import Path
class MyTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y-x), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MyTransformInv(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class MyTransformInv(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y+x), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MyTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
tr = MyTransform(1)
grid_helper = GridHelperCurveLinear(tr)
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot_class_factory
from axislines import Axes
SubplotHost = host_subplot_class_factory(Axes)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
fig.add_subplot(ax1)
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
ax1.parasites.append(ax2)
ax2.plot([3, 6], [5.0, 10.])
ax1.set_aspect(1.)
ax1.set_xlim(0, 10)
ax1.set_ylim(0, 10)
ax1.grid(True)
plt.draw()
def curvelinear_test2(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1
import numpy as np
import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost, \
ParasiteAxesAuxTrans
import matplotlib.cbook as cbook
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(5)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat"] = axis = grid_helper.new_floating_axis(0, 60, axes=ax1)
axis.label.set_text("Test")
axis.label.set_visible(True)
#axis._extremes = 2, 10
#axis.label.set_text("Test")
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.get_helper()._extremes=2, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 6, axes=ax1)
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.label.set_text("Test 2")
axis.get_helper()._extremes=-180, 90
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 30]), 50),
intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
def curvelinear_test3(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1, axis
import numpy as np
import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
for axis in ax1.axis.itervalues():
axis.set_visible(False)
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat1"] = axis = grid_helper.new_floating_axis(0, 130,
axes=ax1,
axis_direction="left"
)
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
grid_helper = ax1.get_grid_helper()
ax1.axis["lat2"] = axis = grid_helper.new_floating_axis(0, 50, axes=ax1,
axis_direction="right")
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 10,
axes=ax1,
axis_direction="bottom")
axis.label.set_text("Test 2")
axis.get_helper()._extremes= 50, 130
axis.major_ticklabels.set_axis_direction("top")
axis.label.set_axis_direction("top")
grid_helper.grid_finder.grid_locator1.den = 5
grid_helper.grid_finder.grid_locator2._nbins = 5
# # A parasite axes with given transform
# ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# # note that ax2.transData == tr + ax1.transData
# # Anthing you draw in ax2 will match the ticks and grids of ax1.
# ax1.parasites.append(ax2)
# intp = cbook.simple_linear_interpolation
# ax2.plot(intp(np.array([0, 30]), 50),
# intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(5, 5))
fig.clf()
#test3()
#curvelinear_test2(fig)
curvelinear_test3(fig)
#plt.draw()
plt.show()
| gpl-2.0 |
jcchin/MagnePlane | src/hyperloop/Python/OldMagnePlaneCode/tube_cost.py | 4 | 4899 | # import libraries
from math import pi, sin
import matplotlib.pyplot as plt
from openmdao.core.component import Component
class TubeCost(Component):
def __init__(self):
super(TubeCost, self).__init__()
self.add_param('cmt', 1.2, desc='cost of materials', units='1/kg')
self.add_param('cship', 1.e11, desc='cost to ship and crew', units='')
self.add_param('cpod', 1.e6, desc='cost of a pod', units='')
self.add_param('bm', 20., desc='bond maturity')
self.add_param('ib', 0.06, desc='interest bond rate', units='deg')
self.add_param('stress', 2.0e8, desc='stress of steel', units='Pa')
self.add_param('sf', 5.0, desc='safety factor', units='')
self.add_param('depth', 10., desc='depth of tube', units='m')
self.add_param('density',
8050.0,
desc='density of steel',
units='kg/m**3')
self.add_param('g', 9.81, desc='gravity', units='m/s**2')
self.add_param('radius', 4.0, desc='tube radius', units='m')
self.add_param('pod_freq',
30.,
desc='seconds between departures',
units='s')
self.add_param('range', 4.1e6, desc='length of tube', units='m')
self.add_param('npax', 25., desc='passengers per pod', units='')
self.add_param('speed', 270., desc='passengers per pod', units='m/s')
self.add_output('tube_weight',
0.0,
desc='tube weight per meter',
units='kg/m')
self.add_output('po_tube',
0.0,
desc='pressure on the tube',
units='kg/m**2')
self.add_output('tube_thick', 0.0, desc='tube thickness', units='m')
self.add_output('ct', 0.0, desc='tube cost per meter', units='1/m')
self.add_output('cttot', 0.0, desc='total tube cost', units='')
self.add_output('npod',
0.0,
desc='number of pods in the tube',
units='')
self.add_output('ctick', 0.0, desc='ticket cost', units='1/m')
def solve_nonlinear(self, p, u, r):
u['po_tube'] = 101000 + p['depth'] * p['g'] * 1000.
u['tube_thick'] = p['sf'] * u['po_tube'] * p['radius'] / (2 *
p['stress'])
u['tube_weight'] = p['density'] * pi * u['tube_thick'] * (
2 * p['radius'] - u['tube_thick'])
u['ct'] = p['cmt'] * u['tube_weight']
u['cttot'] = u['ct'] * p['range']
u['npod'] = (p['range'] / p['speed']) / p['pod_freq']
u['ctick'] = ((u['ct']*p['range'] + p['cpod']*u['npod'] + p['cship'])*(1+p['ib'])) \
/ (p['npax']/p['pod_freq'])/p['bm']/365./24./3600.
if __name__ == '__main__':
from openmdao.core.problem import Problem
from openmdao.core.group import Group
p = Problem(root=Group())
p.root.add('cost', TubeCost())
p.setup()
p.run()
# save variable sweeps in arrays
cost_array = []
cx = []
cost_array2 = []
cx2 = []
print(p['cost.po_tube'])
print(p['cost.tube_thick'])
print(p['cost.ct'])
print(p['cost.ct'])
print(p['cost.npod'])
for i in xrange(1, 100, 1):
p['cost.pod_freq'] = i
p.run()
cost_array.append(p['cost.ctick'])
cx.append(i)
for i in xrange(10, 35, 1):
p['cost.pod_freq'] = 30.
p['cost.stress'] = i * 1.e7
p.run()
cost_array2.append(p['cost.ctick'])
cx2.append(i * 1.e7)
# plot variable sweeps
fig = plt.figure()
a1 = fig.add_subplot(211)
a1.plot(cx, cost_array)
plt.xlabel('seconds between departures')
plt.ylabel('ticket cost')
a2 = fig.add_subplot(212)
a2.plot(cx2, cost_array2)
plt.xlabel('steel strength (Pa)')
plt.ylabel('ticket price')
plt.show()
# Tom Gregory
# Cost of steel per ton is about USD 777 and fabrication +
# erection cost is about USD 266. But this is for industrial applications upto
# 14 meters height. This may be high for high rise buildings but fabrication
# and erection costs generally do not cross the supply cost.
# Supply AUD 2,500/tonne (~USD 1,821/ton)
# Shop Detailing AUD 500/tonne (~USD 364/ton)
# Fabrication AUD 3,000/tonne (~USD 2,185/ton)
# Transport AUD 150/tonne (~USD 109/ton)
# Erection Labour AUD 2,400/tonne (~USD 1,748/ton)
# Erection Plant AUD 1,200/tonne (~USD 874/ton)
# TOTAL AUD 9,750/tonne (~USD 5,339/ton)
# Steel:
# Another reference
# Tube cost = supply + transport + stir welding + erection
# cmtl = 700 + 150 + 140 +200 = 1200 $/(1000 kg)
# ship cost: w friction welding, handling,
| apache-2.0 |
sssllliang/BuildingMachineLearningSystemsWithPython | ch09/utils.py | 24 | 5568 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
import sys
from matplotlib import pylab
import numpy as np
DATA_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "data")
CHART_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "charts")
for d in [DATA_DIR, CHART_DIR]:
if not os.path.exists(d):
os.mkdir(d)
# Put your directory to the different music genres here
GENRE_DIR = None
GENRE_LIST = ["classical", "jazz", "country", "pop", "rock", "metal"]
# Put your directory to the test dir here
TEST_DIR = None
if GENRE_DIR is None or TEST_DIR is None:
print("Please set GENRE_DIR and TEST_DIR in utils.py")
sys.exit(1)
def plot_confusion_matrix(cm, genre_list, name, title):
pylab.clf()
pylab.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
ax = pylab.axes()
ax.set_xticks(range(len(genre_list)))
ax.set_xticklabels(genre_list)
ax.xaxis.set_ticks_position("bottom")
ax.set_yticks(range(len(genre_list)))
ax.set_yticklabels(genre_list)
pylab.title(title)
pylab.colorbar()
pylab.grid(False)
pylab.show()
pylab.xlabel('Predicted class')
pylab.ylabel('True class')
pylab.grid(False)
pylab.savefig(
os.path.join(CHART_DIR, "confusion_matrix_%s.png" % name), bbox_inches="tight")
def plot_pr(auc_score, name, precision, recall, label=None):
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
pylab.grid(True)
pylab.fill_between(recall, precision, alpha=0.5)
pylab.plot(recall, precision, lw=1)
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('Recall')
pylab.ylabel('Precision')
pylab.title('P/R curve (AUC = %0.2f) / %s' % (auc_score, label))
filename = name.replace(" ", "_")
pylab.savefig(
os.path.join(CHART_DIR, "pr_" + filename + ".png"), bbox_inches="tight")
def plot_roc(auc_score, name, tpr, fpr, label=None):
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
pylab.grid(True)
pylab.plot([0, 1], [0, 1], 'k--')
pylab.plot(fpr, tpr)
pylab.fill_between(fpr, tpr, alpha=0.5)
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('False Positive Rate')
pylab.ylabel('True Positive Rate')
pylab.title('ROC curve (AUC = %0.2f) / %s' %
(auc_score, label), verticalalignment="bottom")
pylab.legend(loc="lower right")
filename = name.replace(" ", "_")
pylab.savefig(
os.path.join(CHART_DIR, "roc_" + filename + ".png"), bbox_inches="tight")
def show_most_informative_features(vectorizer, clf, n=20):
c_f = sorted(zip(clf.coef_[0], vectorizer.get_feature_names()))
top = zip(c_f[:n], c_f[:-(n + 1):-1])
for (c1, f1), (c2, f2) in top:
print("\t%.4f\t%-15s\t\t%.4f\t%-15s" % (c1, f1, c2, f2))
def plot_log():
pylab.clf()
x = np.arange(0.001, 1, 0.001)
y = np.log(x)
pylab.title('Relationship between probabilities and their logarithm')
pylab.plot(x, y)
pylab.grid(True)
pylab.xlabel('P')
pylab.ylabel('log(P)')
filename = 'log_probs.png'
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_feat_importance(feature_names, clf, name):
pylab.clf()
coef_ = clf.coef_
important = np.argsort(np.absolute(coef_.ravel()))
f_imp = feature_names[important]
coef = coef_.ravel()[important]
inds = np.argsort(coef)
f_imp = f_imp[inds]
coef = coef[inds]
xpos = np.array(range(len(coef)))
pylab.bar(xpos, coef, width=1)
pylab.title('Feature importance for %s' % (name))
ax = pylab.gca()
ax.set_xticks(np.arange(len(coef)))
labels = ax.set_xticklabels(f_imp)
for label in labels:
label.set_rotation(90)
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(
CHART_DIR, "feat_imp_%s.png" % filename), bbox_inches="tight")
def plot_feat_hist(data_name_list, filename=None):
pylab.clf()
num_rows = 1 + (len(data_name_list) - 1) / 2
num_cols = 1 if len(data_name_list) == 1 else 2
pylab.figure(figsize=(5 * num_cols, 4 * num_rows))
for i in range(num_rows):
for j in range(num_cols):
pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
x, name = data_name_list[i * num_cols + j]
pylab.title(name)
pylab.xlabel('Value')
pylab.ylabel('Density')
# the histogram of the data
max_val = np.max(x)
if max_val <= 1.0:
bins = 50
elif max_val > 50:
bins = 50
else:
bins = max_val
n, bins, patches = pylab.hist(
x, bins=bins, normed=1, facecolor='green', alpha=0.75)
pylab.grid(True)
if not filename:
filename = "feat_hist_%s.png" % name
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_bias_variance(data_sizes, train_errors, test_errors, name):
pylab.clf()
pylab.ylim([0.0, 1.0])
pylab.xlabel('Data set size')
pylab.ylabel('Error')
pylab.title("Bias-Variance for '%s'" % name)
pylab.plot(
data_sizes, train_errors, "-", data_sizes, test_errors, "--", lw=1)
pylab.legend(["train error", "test error"], loc="upper right")
pylab.grid(True)
pylab.savefig(os.path.join(CHART_DIR, "bv_" + name + ".png"))
| mit |
skywalkerytx/oracle | src/main/python/stats.py | 1 | 2546 | # coding=utf-8
import psycopg2
import numpy as np
import matplotlib.pyplot as plt
con = psycopg2.connect(database = 'nova',user = 'nova',password = 'emeth')
cur = con.cursor()
extra = "and k>%s and raw.date >='2016-03-01'"
X = np.arange(10,80,1)
sr = np.zeros(len(X))
recall = np.zeros(len(X))
count = 0
for lower_bound in X:
lb = int(lower_bound)
cur.execute('''
SELECT
count(1)
FROM
raw
INNER JOIN label
ON raw.code = label.code AND raw.date = label.date
WHERE
raw.kdjcross='金叉' and raw.macdcross='金叉' and label.vector[1]=0 '''+extra,(lb,))
fail = cur.fetchone()[0]
cur.execute('''
SELECT
count(1)
FROM
raw
INNER JOIN label
ON raw.code = label.code AND raw.date = label.date
WHERE
raw.kdjcross='金叉' and raw.macdcross='金叉' and label.vector[1]=1 '''+extra,(lb,))
success = cur.fetchone()[0]
total = fail+success
cr = 1.0*success/total
willpick = total/384222.0*2575
rightpick = success/384222.0*2575
print('%d %.4f will pick:%.4f right amount: %.4f'%(lb,cr,willpick,rightpick))
sr[count] = cr
recall[count] = rightpick
count = count +1
plt.figure(1)
plt.subplot(211)
plt.plot(X,sr,'r')
plt.subplot(212)
plt.plot(X,recall,'b')
plt.show()
latests = []
GeneratePast = False
if GeneratePast:
latests = ['2017-04-26', '2017-04-27', '2017-04-28']
else:
cur.execute('SELECT DISTINCT date FROM raw ORDER BY date DESC LIMIT 1')
latests = [cur.fetchone()[0]]
for latest in latests:
print(latest)
cur.execute("""
SELECT
code,k
FROM
raw
WHERE
1=1
AND raw.kdjcross='金叉'
AND raw.macdcross='金叉'
AND date = %s
ORDER BY k DESC
""", (latest,))
result = cur.fetchall()
filename = 'data/result/' + latest + '.csv'
print('writing %s' % filename)
f = open(filename, 'w')
f.write('code,prob\n')
Sorted = []
for line in result:
code = line[0]
k = line[1]
pos = len(X) - 1
for j in range(1, len(X)):
if X[j - 1] <= k and X[j] >= k:
pos = j - 1
break
Sorted.append((sr[pos], code))
Sorted = sorted(Sorted, reverse=True)
for line in Sorted:
code = line[1]
prob = line[0]
print(code, prob)
if prob <= 0.52:
continue
s = code + ',>' + str(prob)[0:6] + '\n'
f.write(s)
print(s)
f.close()
| mit |
jason-z-hang/airflow | setup.py | 1 | 3185 | from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
# Kept manually in sync with airflow.__version__
version = '1.5.2'
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
celery = [
'celery>=3.1.17',
'flower>=0.7.3'
]
crypto = ['cryptography>=0.9.3']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
druid = ['pydruid>=0.2.1']
hdfs = ['snakebite>=2.4.13']
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'pyhs2>=0.6.0',
]
jdbc = ['jaydebeapi>=0.2.0']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.13.0']
mysql = ['mysqlclient>=1.3.6']
optional = ['librabbitmq>=1.6.1']
oracle = ['cx_Oracle>=5.1.2']
postgres = ['psycopg2>=2.6']
s3 = ['boto>=2.36.0']
samba = ['pysmbclient>=0.1.3']
slack = ['slackclient>=0.15']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica
devel = all_dbs + doc + samba + s3 + ['nose'] + slack + crypto + oracle
setup(
name='airflow',
description='Programmatically author, schedule and monitor data pipelines',
version=version,
packages=find_packages(),
package_data={'': ['airflow/alembic.ini']},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.0, <0.9',
'chartkick>=0.4.2, < 0.5',
'dill>=0.2.2, <0.3',
'flask>=0.10.1, <0.11',
'flask-admin==1.2.0',
'flask-cache>=0.13.1, <0.14',
'flask-login==0.2.11',
'future>=0.15.0, <0.16',
'gunicorn>=19.3.0, <20.0',
'jinja2>=2.7.3, <3.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.15.2, <1.0.0',
'pygments>=2.0.1, <3.0',
'python-dateutil>=2.3, <3',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=0.9.8',
'thrift>=0.9.2, <0.10',
],
extras_require={
'all': devel + optional,
'all_dbs': all_dbs,
'celery': celery,
'crypto': crypto,
'devel': devel,
'doc': doc,
'druid': druid,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'postgres': postgres,
's3': s3,
'samba': samba,
'slack': slack,
'statsd': statsd,
'vertica': vertica,
},
author='Maxime Beauchemin',
author_email='maximebeauchemin@gmail.com',
url='https://github.com/airbnb/airflow',
download_url=(
'https://github.com/airbnb/airflow/tarball/' + version),
cmdclass={'test': Tox},
)
| apache-2.0 |
lbishal/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 34 | 3060 | # Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
belltailjp/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
msebire/intellij-community | python/helpers/pycharm_matplotlib_backend/backend_interagg.py | 4 | 3100 | import base64
import matplotlib
import os
import sys
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import FigureManagerBase, ShowBase
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
from datalore.display import display
PY3 = sys.version_info[0] >= 3
index = int(os.getenv("PYCHARM_MATPLOTLIB_INDEX", 0))
rcParams = matplotlib.rcParams
class Show(ShowBase):
def __call__(self, **kwargs):
managers = Gcf.get_all_fig_managers()
if not managers:
return
for manager in managers:
manager.show(**kwargs)
def mainloop(self):
pass
show = Show()
# from pyplot API
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.show()
# from pyplot API
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, figure)
# from pyplot API
def new_figure_manager_given_figure(num, figure):
canvas = FigureCanvasInterAgg(figure)
manager = FigureManagerInterAgg(canvas, num)
return manager
# from pyplot API
class FigureCanvasInterAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self, figure)
def show(self):
self.figure.tight_layout()
FigureCanvasAgg.draw(self)
if matplotlib.__version__ < '1.2':
buffer = self.tostring_rgb(0, 0)
else:
buffer = self.tostring_rgb()
if len(set(buffer)) <= 1:
# do not plot empty
return
render = self.get_renderer()
width = int(render.width)
plot_index = index if os.getenv("PYCHARM_MATPLOTLIB_INTERACTIVE", False) else -1
display(DisplayDataObject(plot_index, width, buffer))
def draw(self):
FigureCanvasAgg.draw(self)
is_interactive = os.getenv("PYCHARM_MATPLOTLIB_INTERACTIVE", False)
if is_interactive and matplotlib.is_interactive():
self.show()
class FigureManagerInterAgg(FigureManagerBase):
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
global index
index += 1
self.canvas = canvas
self._num = num
self._shown = False
def show(self, **kwargs):
self.canvas.show()
Gcf.destroy(self._num)
class DisplayDataObject:
def __init__(self, plot_index, width, image_bytes):
self.plot_index = plot_index
self.image_width = width
self.image_bytes = image_bytes
def _repr_display_(self):
image_bytes_base64 = base64.b64encode(self.image_bytes)
if PY3:
image_bytes_base64 = image_bytes_base64.decode()
body = {
'plot_index': self.plot_index,
'image_width': self.image_width,
'image_base64': image_bytes_base64
}
return ('pycharm-plot-image', body)
| apache-2.0 |
pratapvardhan/pandas | pandas/tests/extension/category/test_categorical.py | 2 | 5378 | import string
import pytest
import pandas as pd
import numpy as np
from pandas.api.types import CategoricalDtype
from pandas import Categorical
from pandas.tests.extension import base
def make_data():
return np.random.choice(list(string.ascii_letters), size=100)
@pytest.fixture
def dtype():
return CategoricalDtype()
@pytest.fixture
def data():
"""Length-100 PeriodArray for semantics test."""
return Categorical(make_data())
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return Categorical([np.nan, 'A'])
@pytest.fixture
def data_repeated():
"""Return different versions of data for count times"""
def gen(count):
for _ in range(count):
yield Categorical(make_data())
yield gen
@pytest.fixture
def data_for_sorting():
return Categorical(['A', 'B', 'C'], categories=['C', 'A', 'B'],
ordered=True)
@pytest.fixture
def data_missing_for_sorting():
return Categorical(['A', None, 'B'], categories=['B', 'A'],
ordered=True)
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def data_for_grouping():
return Categorical(['a', 'a', None, None, 'b', 'b', 'a', 'c'])
class TestDtype(base.BaseDtypeTests):
def test_array_type_with_arg(self, data, dtype):
assert dtype.construct_array_type() is Categorical
class TestInterface(base.BaseInterfaceTests):
@pytest.mark.skip(reason="Memory usage doesn't match")
def test_memory_usage(self):
# Is this deliberate?
pass
class TestConstructors(base.BaseConstructorsTests):
pass
class TestReshaping(base.BaseReshapingTests):
@pytest.mark.skip(reason="Unobserved categories preseved in concat.")
def test_concat_columns(self, data, na_value):
pass
@pytest.mark.skip(reason="Unobserved categories preseved in concat.")
def test_align(self, data, na_value):
pass
@pytest.mark.skip(reason="Unobserved categories preseved in concat.")
def test_align_frame(self, data, na_value):
pass
@pytest.mark.skip(reason="Unobserved categories preseved in concat.")
def test_merge(self, data, na_value):
pass
class TestGetitem(base.BaseGetitemTests):
skip_take = pytest.mark.skip(reason="GH-20664.")
@pytest.mark.skip(reason="Backwards compatibility")
def test_getitem_scalar(self):
# CategoricalDtype.type isn't "correct" since it should
# be a parent of the elements (object). But don't want
# to break things by changing.
pass
@skip_take
def test_take(self):
# TODO remove this once Categorical.take is fixed
pass
@skip_take
def test_take_negative(self):
pass
@skip_take
def test_take_pandas_style_negative_raises(self):
pass
@skip_take
def test_take_non_na_fill_value(self):
pass
@skip_take
def test_take_out_of_bounds_raises(self):
pass
@pytest.mark.skip(reason="GH-20747. Unobserved categories.")
def test_take_series(self):
pass
@skip_take
def test_reindex_non_na_fill_value(self):
pass
@pytest.mark.xfail(reason="Categorical.take buggy")
def test_take_empty(self):
pass
@pytest.mark.xfail(reason="test not written correctly for categorical")
def test_reindex(self):
pass
class TestSetitem(base.BaseSetitemTests):
pass
class TestMissing(base.BaseMissingTests):
@pytest.mark.skip(reason="Not implemented")
def test_fillna_limit_pad(self):
pass
@pytest.mark.skip(reason="Not implemented")
def test_fillna_limit_backfill(self):
pass
class TestMethods(base.BaseMethodsTests):
pass
@pytest.mark.skip(reason="Unobserved categories included")
def test_value_counts(self, all_data, dropna):
pass
def test_combine_add(self, data_repeated):
# GH 20825
# When adding categoricals in combine, result is a string
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 + x2)
expected = pd.Series(([a + b for (a, b) in
zip(list(orig_data1), list(orig_data2))]))
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 + x2)
expected = pd.Series([a + val for a in list(orig_data1)])
self.assert_series_equal(result, expected)
class TestCasting(base.BaseCastingTests):
pass
class TestArithmeticOps(base.BaseArithmeticOpsTests):
def test_arith_scalar(self, data, all_arithmetic_operators):
op_name = all_arithmetic_operators
if op_name != '__rmod__':
super(TestArithmeticOps, self).test_arith_scalar(data, op_name)
else:
pytest.skip('rmod never called when string is first argument')
class TestComparisonOps(base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
op = self.get_op_from_name(op_name)
if op_name == '__eq__':
assert not op(data, other).all()
elif op_name == '__ne__':
assert op(data, other).all()
else:
with pytest.raises(TypeError):
op(data, other)
| bsd-3-clause |
CallaJun/hackprince | indico/matplotlib/text.py | 10 | 73585 | """
Classes for including text in a figure.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import math
import warnings
import numpy as np
from matplotlib import cbook
from matplotlib import rcParams
import matplotlib.artist as artist
from matplotlib.artist import Artist
from matplotlib.cbook import is_string_like, maxdict
from matplotlib import docstring
from matplotlib.font_manager import FontProperties
from matplotlib.patches import bbox_artist, YAArrow, FancyBboxPatch
from matplotlib.patches import FancyArrowPatch, Rectangle
import matplotlib.transforms as mtransforms
from matplotlib.transforms import Affine2D, Bbox, Transform
from matplotlib.transforms import BboxBase, BboxTransformTo
from matplotlib.lines import Line2D
from matplotlib.artist import allow_rasterization
from matplotlib.backend_bases import RendererBase
from matplotlib.textpath import TextPath
def _process_text_args(override, fontdict=None, **kwargs):
"Return an override dict. See :func:`~pyplot.text' docstring for info"
if fontdict is not None:
override.update(fontdict)
override.update(kwargs)
return override
# Extracted from Text's method to serve as a function
def get_rotation(rotation):
"""
Return the text angle as float.
*rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.
"""
if rotation in ('horizontal', None):
angle = 0.
elif rotation == 'vertical':
angle = 90.
else:
angle = float(rotation)
return angle % 360
# these are not available for the object inspector until after the
# class is build so we define an initial set here for the init
# function and they will be overridden after object defn
docstring.interpd.update(Text="""
========================== ================================================
Property Value
========================== ================================================
alpha float or None
animated [True | False]
backgroundcolor any matplotlib color
bbox rectangle prop dict plus key 'pad' which is a
pad in points
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
color any matplotlib color
family ['serif' | 'sans-serif' | 'cursive' |
'fantasy' | 'monospace']
figure a matplotlib.figure.Figure instance
fontproperties a matplotlib.font_manager.FontProperties
instance
horizontalalignment or ha ['center' | 'right' | 'left']
label any string
linespacing float
lod [True | False]
multialignment ['left' | 'right' | 'center' ]
name or fontname string e.g.,
['Sans' | 'Courier' | 'Helvetica' ...]
position (x,y)
rotation [ angle in degrees 'vertical' | 'horizontal'
rotation_mode [ None | 'anchor']
size or fontsize [size in points | relative size e.g., 'smaller',
'x-large']
style or fontstyle [ 'normal' | 'italic' | 'oblique']
text string
transform a matplotlib.transform transformation instance
variant ['normal' | 'small-caps']
verticalalignment or va ['center' | 'top' | 'bottom' | 'baseline']
visible [True | False]
weight or fontweight ['normal' | 'bold' | 'heavy' | 'light' |
'ultrabold' | 'ultralight']
x float
y float
zorder any number
========================== ===============================================
""")
# TODO : This function may move into the Text class as a method. As a
# matter of fact, The information from the _get_textbox function
# should be available during the Text._get_layout() call, which is
# called within the _get_textbox. So, it would better to move this
# function as a method with some refactoring of _get_layout method.
def _get_textbox(text, renderer):
"""
Calculate the bounding box of the text. Unlike
:meth:`matplotlib.text.Text.get_extents` method, The bbox size of
the text before the rotation is calculated.
"""
projected_xs = []
projected_ys = []
theta = np.deg2rad(text.get_rotation())
tr = mtransforms.Affine2D().rotate(-theta)
_, parts, d = text._get_layout(renderer)
for t, wh, x, y in parts:
w, h = wh
xt1, yt1 = tr.transform_point((x, y))
yt1 -= d
xt2, yt2 = xt1 + w, yt1 + h
projected_xs.extend([xt1, xt2])
projected_ys.extend([yt1, yt2])
xt_box, yt_box = min(projected_xs), min(projected_ys)
w_box, h_box = max(projected_xs) - xt_box, max(projected_ys) - yt_box
tr = mtransforms.Affine2D().rotate(theta)
x_box, y_box = tr.transform_point((xt_box, yt_box))
return x_box, y_box, w_box, h_box
class Text(Artist):
"""
Handle storing and drawing of text in window or data coordinates.
"""
zorder = 3
_cached = maxdict(50)
def __str__(self):
return "Text(%g,%g,%s)" % (self._x, self._y, repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='baseline',
horizontalalignment='left',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
rotation_mode=None,
**kwargs
):
"""
Create a :class:`~matplotlib.text.Text` instance at *x*, *y*
with string *text*.
Valid kwargs are
%(Text)s
"""
Artist.__init__(self)
self._x, self._y = x, y
if color is None:
color = rcParams['text.color']
if fontproperties is None:
fontproperties = FontProperties()
elif is_string_like(fontproperties):
fontproperties = FontProperties(fontproperties)
self.set_text(text)
self.set_color(color)
self._verticalalignment = verticalalignment
self._horizontalalignment = horizontalalignment
self._multialignment = multialignment
self._rotation = rotation
self._fontproperties = fontproperties
self._bbox = None
self._bbox_patch = None # a FancyBboxPatch instance
self._renderer = None
if linespacing is None:
linespacing = 1.2 # Maybe use rcParam later.
self._linespacing = linespacing
self.set_rotation_mode(rotation_mode)
self.update(kwargs)
#self.set_bbox(dict(pad=0))
def __getstate__(self):
d = super(Text, self).__getstate__()
# remove the cached _renderer (if it exists)
d['_renderer'] = None
return d
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the patch.
In the case of text, a hit is true anywhere in the
axis-aligned bounding-box containing the text.
Returns True or False.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
if not self.get_visible() or self._renderer is None:
return False, {}
l, b, w, h = self.get_window_extent().bounds
r, t = l + w, b + h
x, y = mouseevent.x, mouseevent.y
inside = (l <= x <= r and b <= y <= t)
cattr = {}
# if the text has a surrounding patch, also check containment for it,
# and merge the results with the results for the text.
if self._bbox_patch:
patch_inside, patch_cattr = self._bbox_patch.contains(mouseevent)
inside = inside or patch_inside
cattr["bbox_patch"] = patch_cattr
return inside, cattr
def _get_xy_display(self):
'get the (possibly unit converted) transformed x, y in display coords'
x, y = self.get_position()
return self.get_transform().transform_point((x, y))
def _get_multialignment(self):
if self._multialignment is not None:
return self._multialignment
else:
return self._horizontalalignment
def get_rotation(self):
'return the text angle as float in degrees'
return get_rotation(self._rotation) # string_or_number -> number
def set_rotation_mode(self, m):
"""
set text rotation mode. If "anchor", the un-rotated text
will first aligned according to their *ha* and
*va*, and then will be rotated with the alignement
reference point as a origin. If None (default), the text will be
rotated first then will be aligned.
"""
if m is None or m in ["anchor", "default"]:
self._rotation_mode = m
else:
raise ValueError("Unknown rotation_mode : %s" % repr(m))
def get_rotation_mode(self):
"get text rotation mode"
return self._rotation_mode
def update_from(self, other):
'Copy properties from other to self'
Artist.update_from(self, other)
self._color = other._color
self._multialignment = other._multialignment
self._verticalalignment = other._verticalalignment
self._horizontalalignment = other._horizontalalignment
self._fontproperties = other._fontproperties.copy()
self._rotation = other._rotation
self._picker = other._picker
self._linespacing = other._linespacing
def _get_layout(self, renderer):
"""
return the extent (bbox) of the text together with
multile-alignment information. Note that it returns a extent
of a rotated text when necessary.
"""
key = self.get_prop_tup()
if key in self._cached:
return self._cached[key]
horizLayout = []
thisx, thisy = 0.0, 0.0
xmin, ymin = 0.0, 0.0
width, height = 0.0, 0.0
lines = self.get_text().split('\n')
whs = np.zeros((len(lines), 2))
horizLayout = np.zeros((len(lines), 4))
# Find full vertical extent of font,
# including ascenders and descenders:
tmp, lp_h, lp_bl = renderer.get_text_width_height_descent('lp',
self._fontproperties,
ismath=False)
offsety = (lp_h - lp_bl) * self._linespacing
baseline = 0
for i, line in enumerate(lines):
clean_line, ismath = self.is_math_text(line)
if clean_line:
w, h, d = renderer.get_text_width_height_descent(clean_line,
self._fontproperties,
ismath=ismath)
else:
w, h, d = 0, 0, 0
# For multiline text, increase the line spacing when the
# text net-height(excluding baseline) is larger than that
# of a "l" (e.g., use of superscripts), which seems
# what TeX does.
h = max(h, lp_h)
d = max(d, lp_bl)
whs[i] = w, h
baseline = (h - d) - thisy
thisy -= max(offsety, (h - d) * self._linespacing)
horizLayout[i] = thisx, thisy, w, h
thisy -= d
width = max(width, w)
descent = d
ymin = horizLayout[-1][1]
ymax = horizLayout[0][1] + horizLayout[0][3]
height = ymax - ymin
xmax = xmin + width
# get the rotation matrix
M = Affine2D().rotate_deg(self.get_rotation())
offsetLayout = np.zeros((len(lines), 2))
offsetLayout[:] = horizLayout[:, 0:2]
# now offset the individual text lines within the box
if len(lines) > 1: # do the multiline aligment
malign = self._get_multialignment()
if malign == 'center':
offsetLayout[:, 0] += width / 2.0 - horizLayout[:, 2] / 2.0
elif malign == 'right':
offsetLayout[:, 0] += width - horizLayout[:, 2]
# the corners of the unrotated bounding box
cornersHoriz = np.array(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)],
np.float_)
cornersHoriz[:, 1] -= descent
# now rotate the bbox
cornersRotated = M.transform(cornersHoriz)
txs = cornersRotated[:, 0]
tys = cornersRotated[:, 1]
# compute the bounds of the rotated box
xmin, xmax = txs.min(), txs.max()
ymin, ymax = tys.min(), tys.max()
width = xmax - xmin
height = ymax - ymin
# Now move the box to the target position offset the display
# bbox by alignment
halign = self._horizontalalignment
valign = self._verticalalignment
rotation_mode = self.get_rotation_mode()
if rotation_mode != "anchor":
# compute the text location in display coords and the offsets
# necessary to align the bbox with that location
if halign == 'center':
offsetx = (xmin + width / 2.0)
elif halign == 'right':
offsetx = (xmin + width)
else:
offsetx = xmin
if valign == 'center':
offsety = (ymin + height / 2.0)
elif valign == 'top':
offsety = (ymin + height)
elif valign == 'baseline':
offsety = (ymin + height) - baseline
else:
offsety = ymin
else:
xmin1, ymin1 = cornersHoriz[0]
xmax1, ymax1 = cornersHoriz[2]
if halign == 'center':
offsetx = (xmin1 + xmax1) / 2.0
elif halign == 'right':
offsetx = xmax1
else:
offsetx = xmin1
if valign == 'center':
offsety = (ymin1 + ymax1) / 2.0
elif valign == 'top':
offsety = ymax1
elif valign == 'baseline':
offsety = ymax1 - baseline
else:
offsety = ymin1
offsetx, offsety = M.transform_point((offsetx, offsety))
xmin -= offsetx
ymin -= offsety
bbox = Bbox.from_bounds(xmin, ymin, width, height)
# now rotate the positions around the first x,y position
xys = M.transform(offsetLayout)
xys -= (offsetx, offsety)
xs, ys = xys[:, 0], xys[:, 1]
ret = bbox, list(zip(lines, whs, xs, ys)), descent
self._cached[key] = ret
return ret
def set_bbox(self, rectprops):
"""
Draw a bounding box around self. rectprops are any settable
properties for a rectangle, e.g., facecolor='red', alpha=0.5.
t.set_bbox(dict(facecolor='red', alpha=0.5))
If rectprops has "boxstyle" key. A FancyBboxPatch
is initialized with rectprops and will be drawn. The mutation
scale of the FancyBboxPath is set to the fontsize.
ACCEPTS: rectangle prop dict
"""
# The self._bbox_patch object is created only if rectprops has
# boxstyle key. Otherwise, self._bbox will be set to the
# rectprops and the bbox will be drawn using bbox_artist
# function. This is to keep the backward compatibility.
if rectprops is not None and "boxstyle" in rectprops:
props = rectprops.copy()
boxstyle = props.pop("boxstyle")
bbox_transmuter = props.pop("bbox_transmuter", None)
self._bbox_patch = FancyBboxPatch(
(0., 0.),
1., 1.,
boxstyle=boxstyle,
bbox_transmuter=bbox_transmuter,
transform=mtransforms.IdentityTransform(),
**props)
self._bbox = None
else:
self._bbox_patch = None
self._bbox = rectprops
self._update_clip_properties()
def get_bbox_patch(self):
"""
Return the bbox Patch object. Returns None if the the
FancyBboxPatch is not made.
"""
return self._bbox_patch
def update_bbox_position_size(self, renderer):
"""
Update the location and the size of the bbox. This method
should be used when the position and size of the bbox needs to
be updated before actually drawing the bbox.
"""
if self._bbox_patch:
trans = self.get_transform()
# don't use self.get_position here, which refers to text position
# in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0., w_box, h_box)
theta = np.deg2rad(self.get_rotation())
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx + x_box, posy + y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
#self._bbox_patch.draw(renderer)
def _draw_bbox(self, renderer, posx, posy):
""" Update the location and the size of the bbox
(FancyBoxPatch), and draw
"""
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0., w_box, h_box)
theta = np.deg2rad(self.get_rotation())
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx + x_box, posy + y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
self._bbox_patch.draw(renderer)
def _update_clip_properties(self):
clipprops = dict(clip_box=self.clipbox,
clip_path=self._clippath,
clip_on=self._clipon)
if self._bbox:
bbox = self._bbox.update(clipprops)
if self._bbox_patch:
bbox = self._bbox_patch.update(clipprops)
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
super(Text, self).set_clip_box(clipbox)
self._update_clip_properties()
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
* a :class:`~matplotlib.patches.Patch` (or subclass) instance
* a :class:`~matplotlib.path.Path` instance, in which case
an optional :class:`~matplotlib.transforms.Transform`
instance may be provided, which will be applied to the
path before using it for clipping.
* *None*, to remove the clipping path
For efficiency, if the path happens to be an axis-aligned
rectangle, this method will set the clipping box to the
corresponding rectangle and set the clipping path to *None*.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
super(Text, self).set_clip_path(path, transform)
self._update_clip_properties()
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
When False artists will be visible out side of the axes which
can lead to unexpected results.
ACCEPTS: [True | False]
"""
super(Text, self).set_clip_on(b)
self._update_clip_properties()
@allow_rasterization
def draw(self, renderer):
"""
Draws the :class:`Text` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
if self.get_text().strip() == '':
return
renderer.open_group('text', self.get_gid())
bbox, info, descent = self._get_layout(renderer)
trans = self.get_transform()
# don't use self.get_position here, which refers to text position
# in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
canvasw, canvash = renderer.get_canvas_width_height()
# draw the FancyBboxPatch
if self._bbox_patch:
self._draw_bbox(renderer, posx, posy)
gc = renderer.new_gc()
gc.set_foreground(self.get_color())
gc.set_alpha(self.get_alpha())
gc.set_url(self._url)
self._set_gc_clip(gc)
if self._bbox:
bbox_artist(self, renderer, self._bbox)
angle = self.get_rotation()
for line, wh, x, y in info:
if not np.isfinite(x) or not np.isfinite(y):
continue
mtext = self if len(info) == 1 else None
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash - y
clean_line, ismath = self.is_math_text(line)
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(),
renderer)
if rcParams['text.usetex']:
renderer.draw_tex(gc, x, y, clean_line,
self._fontproperties, angle, mtext=mtext)
else:
renderer.draw_text(gc, x, y, clean_line,
self._fontproperties, angle,
ismath=ismath, mtext=mtext)
gc.restore()
renderer.close_group('text')
def get_color(self):
"Return the color of the text"
return self._color
def get_fontproperties(self):
"Return the :class:`~font_manager.FontProperties` object"
return self._fontproperties
def get_font_properties(self):
'alias for get_fontproperties'
return self.get_fontproperties()
def get_family(self):
"Return the list of font families used for font lookup"
return self._fontproperties.get_family()
def get_fontfamily(self):
'alias for get_family'
return self.get_family()
def get_name(self):
"Return the font name as string"
return self._fontproperties.get_name()
def get_style(self):
"Return the font style as string"
return self._fontproperties.get_style()
def get_size(self):
"Return the font size as integer"
return self._fontproperties.get_size_in_points()
def get_variant(self):
"Return the font variant as a string"
return self._fontproperties.get_variant()
def get_fontvariant(self):
'alias for get_variant'
return self.get_variant()
def get_weight(self):
"Get the font weight as string or number"
return self._fontproperties.get_weight()
def get_fontname(self):
'alias for get_name'
return self.get_name()
def get_fontstyle(self):
'alias for get_style'
return self.get_style()
def get_fontsize(self):
'alias for get_size'
return self.get_size()
def get_fontweight(self):
'alias for get_weight'
return self.get_weight()
def get_stretch(self):
'Get the font stretch as a string or number'
return self._fontproperties.get_stretch()
def get_fontstretch(self):
'alias for get_stretch'
return self.get_stretch()
def get_ha(self):
'alias for get_horizontalalignment'
return self.get_horizontalalignment()
def get_horizontalalignment(self):
"""
Return the horizontal alignment as string. Will be one of
'left', 'center' or 'right'.
"""
return self._horizontalalignment
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
x = float(self.convert_xunits(self._x))
y = float(self.convert_yunits(self._y))
return x, y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (e.g., layouts) and
need to know if the text has changed.
"""
x, y = self.get_position()
return (x, y, self.get_text(), self._color,
self._verticalalignment, self._horizontalalignment,
hash(self._fontproperties),
self._rotation, self._rotation_mode,
self.figure.dpi, id(self._renderer),
)
def get_text(self):
"Get the text as string"
return self._text
def get_va(self):
'alias for :meth:`getverticalalignment`'
return self.get_verticalalignment()
def get_verticalalignment(self):
"""
Return the vertical alignment as string. Will be one of
'top', 'center', 'bottom' or 'baseline'.
"""
return self._verticalalignment
def get_window_extent(self, renderer=None, dpi=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
*dpi* defaults to self.figure.dpi; the renderer dpi is
irrelevant. For the web application, if figure.dpi is not
the value used when saving the figure, then the value that
was used must be specified as the *dpi* argument.
'''
#return _unit_box
if not self.get_visible():
return Bbox.unit()
if dpi is not None:
dpi_orig = self.figure.dpi
self.figure.dpi = dpi
if self.get_text().strip() == '':
tx, ty = self._get_xy_display()
return Bbox.from_bounds(tx, ty, 0, 0)
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
raise RuntimeError('Cannot get window extent w/o renderer')
bbox, info, descent = self._get_layout(self._renderer)
x, y = self.get_position()
x, y = self.get_transform().transform_point((x, y))
bbox = bbox.translated(x, y)
if dpi is not None:
self.figure.dpi = dpi_orig
return bbox
def set_backgroundcolor(self, color):
"""
Set the background color of the text by updating the bbox.
.. seealso::
:meth:`set_bbox`
To change the position of the bounding box.
ACCEPTS: any matplotlib color
"""
if self._bbox is None:
self._bbox = dict(facecolor=color, edgecolor=color)
else:
self._bbox.update(dict(facecolor=color))
self._update_clip_properties()
def set_color(self, color):
"""
Set the foreground color of the text
ACCEPTS: any matplotlib color
"""
# Make sure it is hashable, or get_prop_tup will fail.
try:
hash(color)
except TypeError:
color = tuple(color)
self._color = color
def set_ha(self, align):
'alias for set_horizontalalignment'
self.set_horizontalalignment(align)
def set_horizontalalignment(self, align):
"""
Set the horizontal alignment to one of
ACCEPTS: [ 'center' | 'right' | 'left' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' %
str(legal))
self._horizontalalignment = align
def set_ma(self, align):
'alias for set_verticalalignment'
self.set_multialignment(align)
def set_multialignment(self, align):
"""
Set the alignment for multiple lines layout. The layout of the
bounding box of all the lines is determined bu the horizontalalignment
and verticalalignment properties, but the multiline text within that
box can be
ACCEPTS: ['left' | 'right' | 'center' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' %
str(legal))
self._multialignment = align
def set_linespacing(self, spacing):
"""
Set the line spacing as a multiple of the font size.
Default is 1.2.
ACCEPTS: float (multiple of font size)
"""
self._linespacing = spacing
def set_family(self, fontname):
"""
Set the font family. May be either a single string, or a list
of strings in decreasing priority. Each string may be either
a real font name or a generic font class name. If the latter,
the specific font names will be looked up in the
:file:`matplotlibrc` file.
ACCEPTS: [FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' |
'monospace' ]
"""
self._fontproperties.set_family(fontname)
def set_variant(self, variant):
"""
Set the font variant, either 'normal' or 'small-caps'.
ACCEPTS: [ 'normal' | 'small-caps' ]
"""
self._fontproperties.set_variant(variant)
def set_fontvariant(self, variant):
'alias for set_variant'
return self.set_variant(variant)
def set_name(self, fontname):
"""alias for set_family"""
return self.set_family(fontname)
def set_fontname(self, fontname):
"""alias for set_family"""
self.set_family(fontname)
def set_style(self, fontstyle):
"""
Set the font style.
ACCEPTS: [ 'normal' | 'italic' | 'oblique']
"""
self._fontproperties.set_style(fontstyle)
def set_fontstyle(self, fontstyle):
'alias for set_style'
return self.set_style(fontstyle)
def set_size(self, fontsize):
"""
Set the font size. May be either a size string, relative to
the default font size, or an absolute font size in points.
ACCEPTS: [size in points | 'xx-small' | 'x-small' | 'small' |
'medium' | 'large' | 'x-large' | 'xx-large' ]
"""
self._fontproperties.set_size(fontsize)
def set_fontsize(self, fontsize):
'alias for set_size'
return self.set_size(fontsize)
def set_weight(self, weight):
"""
Set the font weight.
ACCEPTS: [a numeric value in range 0-1000 | 'ultralight' | 'light' |
'normal' | 'regular' | 'book' | 'medium' | 'roman' |
'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' |
'extra bold' | 'black' ]
"""
self._fontproperties.set_weight(weight)
def set_fontweight(self, weight):
'alias for set_weight'
return self.set_weight(weight)
def set_stretch(self, stretch):
"""
Set the font stretch (horizontal condensation or expansion).
ACCEPTS: [a numeric value in range 0-1000 | 'ultra-condensed' |
'extra-condensed' | 'condensed' | 'semi-condensed' |
'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' |
'ultra-expanded' ]
"""
self._fontproperties.set_stretch(stretch)
def set_fontstretch(self, stretch):
'alias for set_stretch'
return self.set_stretch(stretch)
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the text
ACCEPTS: (x,y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the text
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the *y* position of the text
ACCEPTS: float
"""
self._y = y
def set_rotation(self, s):
"""
Set the rotation of the text
ACCEPTS: [ angle in degrees | 'vertical' | 'horizontal' ]
"""
self._rotation = s
def set_va(self, align):
'alias for set_verticalalignment'
self.set_verticalalignment(align)
def set_verticalalignment(self, align):
"""
Set the vertical alignment
ACCEPTS: [ 'center' | 'top' | 'bottom' | 'baseline' ]
"""
legal = ('top', 'bottom', 'center', 'baseline')
if align not in legal:
raise ValueError('Vertical alignment must be one of %s' %
str(legal))
self._verticalalignment = align
def set_text(self, s):
"""
Set the text string *s*
It may contain newlines (``\\n``) or math in LaTeX syntax.
ACCEPTS: string or anything printable with '%s' conversion.
"""
self._text = '%s' % (s,)
@staticmethod
def is_math_text(s):
"""
Returns a cleaned string and a boolean flag.
The flag indicates if the given string *s* contains any mathtext,
determined by counting unescaped dollar signs. If no mathtext
is present, the cleaned string has its dollar signs unescaped.
If usetex is on, the flag always has the value "TeX".
"""
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
if rcParams['text.usetex']:
if s == ' ':
s = r'\ '
return s, 'TeX'
if cbook.is_math_text(s):
return s, True
else:
return s.replace(r'\$', '$'), False
def set_fontproperties(self, fp):
"""
Set the font properties that control the text. *fp* must be a
:class:`matplotlib.font_manager.FontProperties` object.
ACCEPTS: a :class:`matplotlib.font_manager.FontProperties` instance
"""
if is_string_like(fp):
fp = FontProperties(fp)
self._fontproperties = fp.copy()
def set_font_properties(self, fp):
'alias for set_fontproperties'
self.set_fontproperties(fp)
docstring.interpd.update(Text=artist.kwdoc(Text))
docstring.dedent_interpd(Text.__init__)
class TextWithDash(Text):
"""
This is basically a :class:`~matplotlib.text.Text` with a dash
(drawn with a :class:`~matplotlib.lines.Line2D`) before/after
it. It is intended to be a drop-in replacement for
:class:`~matplotlib.text.Text`, and should behave identically to
it when *dashlength* = 0.0.
The dash always comes between the point specified by
:meth:`~matplotlib.text.Text.set_position` and the text. When a
dash exists, the text alignment arguments (*horizontalalignment*,
*verticalalignment*) are ignored.
*dashlength* is the length of the dash in canvas units.
(default = 0.0).
*dashdirection* is one of 0 or 1, where 0 draws the dash after the
text and 1 before. (default = 0).
*dashrotation* specifies the rotation of the dash, and should
generally stay *None*. In this case
:meth:`~matplotlib.text.TextWithDash.get_dashrotation` returns
:meth:`~matplotlib.text.Text.get_rotation`. (i.e., the dash takes
its rotation from the text's rotation). Because the text center is
projected onto the dash, major deviations in the rotation cause
what may be considered visually unappealing results.
(default = *None*)
*dashpad* is a padding length to add (or subtract) space
between the text and the dash, in canvas units.
(default = 3)
*dashpush* "pushes" the dash and text away from the point
specified by :meth:`~matplotlib.text.Text.set_position` by the
amount in canvas units. (default = 0)
.. note::
The alignment of the two objects is based on the bounding box
of the :class:`~matplotlib.text.Text`, as obtained by
:meth:`~matplotlib.artist.Artist.get_window_extent`. This, in
turn, appears to depend on the font metrics as given by the
rendering backend. Hence the quality of the "centering" of the
label text with respect to the dash varies depending on the
backend used.
.. note::
I'm not sure that I got the
:meth:`~matplotlib.text.TextWithDash.get_window_extent` right,
or whether that's sufficient for providing the object bounding
box.
"""
__name__ = 'textwithdash'
def __str__(self):
return "TextWithDash(%g,%g,%s)" % (self._x, self._y, repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='center',
horizontalalignment='center',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
dashlength=0.0,
dashdirection=0,
dashrotation=None,
dashpad=3,
dashpush=0,
):
Text.__init__(self, x=x, y=y, text=text, color=color,
verticalalignment=verticalalignment,
horizontalalignment=horizontalalignment,
multialignment=multialignment,
fontproperties=fontproperties,
rotation=rotation,
linespacing=linespacing)
# The position (x,y) values for text and dashline
# are bogus as given in the instantiation; they will
# be set correctly by update_coords() in draw()
self.dashline = Line2D(xdata=(x, x),
ydata=(y, y),
color='k',
linestyle='-')
self._dashx = float(x)
self._dashy = float(y)
self._dashlength = dashlength
self._dashdirection = dashdirection
self._dashrotation = dashrotation
self._dashpad = dashpad
self._dashpush = dashpush
#self.set_bbox(dict(pad=0))
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
x = float(self.convert_xunits(self._dashx))
y = float(self.convert_yunits(self._dashy))
return x, y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (e.g., layouts) and
need to know if the text has changed.
"""
props = [p for p in Text.get_prop_tup(self)]
props.extend([self._x, self._y, self._dashlength,
self._dashdirection, self._dashrotation, self._dashpad,
self._dashpush])
return tuple(props)
def draw(self, renderer):
"""
Draw the :class:`TextWithDash` object to the given *renderer*.
"""
self.update_coords(renderer)
Text.draw(self, renderer)
if self.get_dashlength() > 0.0:
self.dashline.draw(renderer)
def update_coords(self, renderer):
"""
Computes the actual *x*, *y* coordinates for text based on the
input *x*, *y* and the *dashlength*. Since the rotation is
with respect to the actual canvas's coordinates we need to map
back and forth.
"""
dashx, dashy = self.get_position()
dashlength = self.get_dashlength()
# Shortcircuit this process if we don't have a dash
if dashlength == 0.0:
self._x, self._y = dashx, dashy
return
dashrotation = self.get_dashrotation()
dashdirection = self.get_dashdirection()
dashpad = self.get_dashpad()
dashpush = self.get_dashpush()
angle = get_rotation(dashrotation)
theta = np.pi * (angle / 180.0 + dashdirection - 1)
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
transform = self.get_transform()
# Compute the dash end points
# The 'c' prefix is for canvas coordinates
cxy = transform.transform_point((dashx, dashy))
cd = np.array([cos_theta, sin_theta])
c1 = cxy + dashpush * cd
c2 = cxy + (dashpush + dashlength) * cd
inverse = transform.inverted()
(x1, y1) = inverse.transform_point(tuple(c1))
(x2, y2) = inverse.transform_point(tuple(c2))
self.dashline.set_data((x1, x2), (y1, y2))
# We now need to extend this vector out to
# the center of the text area.
# The basic problem here is that we're "rotating"
# two separate objects but want it to appear as
# if they're rotated together.
# This is made non-trivial because of the
# interaction between text rotation and alignment -
# text alignment is based on the bbox after rotation.
# We reset/force both alignments to 'center'
# so we can do something relatively reasonable.
# There's probably a better way to do this by
# embedding all this in the object's transformations,
# but I don't grok the transformation stuff
# well enough yet.
we = Text.get_window_extent(self, renderer=renderer)
w, h = we.width, we.height
# Watch for zeros
if sin_theta == 0.0:
dx = w
dy = 0.0
elif cos_theta == 0.0:
dx = 0.0
dy = h
else:
tan_theta = sin_theta / cos_theta
dx = w
dy = w * tan_theta
if dy > h or dy < -h:
dy = h
dx = h / tan_theta
cwd = np.array([dx, dy]) / 2
cwd *= 1 + dashpad / np.sqrt(np.dot(cwd, cwd))
cw = c2 + (dashdirection * 2 - 1) * cwd
newx, newy = inverse.transform_point(tuple(cw))
self._x, self._y = newx, newy
# Now set the window extent
# I'm not at all sure this is the right way to do this.
we = Text.get_window_extent(self, renderer=renderer)
self._twd_window_extent = we.frozen()
self._twd_window_extent.update_from_data_xy(np.array([c1]), False)
# Finally, make text align center
Text.set_horizontalalignment(self, 'center')
Text.set_verticalalignment(self, 'center')
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
'''
self.update_coords(renderer)
if self.get_dashlength() == 0.0:
return Text.get_window_extent(self, renderer=renderer)
else:
return self._twd_window_extent
def get_dashlength(self):
"""
Get the length of the dash.
"""
return self._dashlength
def set_dashlength(self, dl):
"""
Set the length of the dash.
ACCEPTS: float (canvas units)
"""
self._dashlength = dl
def get_dashdirection(self):
"""
Get the direction dash. 1 is before the text and 0 is after.
"""
return self._dashdirection
def set_dashdirection(self, dd):
"""
Set the direction of the dash following the text.
1 is before the text and 0 is after. The default
is 0, which is what you'd want for the typical
case of ticks below and on the left of the figure.
ACCEPTS: int (1 is before, 0 is after)
"""
self._dashdirection = dd
def get_dashrotation(self):
"""
Get the rotation of the dash in degrees.
"""
if self._dashrotation is None:
return self.get_rotation()
else:
return self._dashrotation
def set_dashrotation(self, dr):
"""
Set the rotation of the dash, in degrees
ACCEPTS: float (degrees)
"""
self._dashrotation = dr
def get_dashpad(self):
"""
Get the extra spacing between the dash and the text, in canvas units.
"""
return self._dashpad
def set_dashpad(self, dp):
"""
Set the "pad" of the TextWithDash, which is the extra spacing
between the dash and the text, in canvas units.
ACCEPTS: float (canvas units)
"""
self._dashpad = dp
def get_dashpush(self):
"""
Get the extra spacing between the dash and the specified text
position, in canvas units.
"""
return self._dashpush
def set_dashpush(self, dp):
"""
Set the "push" of the TextWithDash, which
is the extra spacing between the beginning
of the dash and the specified position.
ACCEPTS: float (canvas units)
"""
self._dashpush = dp
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the :class:`TextWithDash`.
ACCEPTS: (x, y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashx = float(x)
def set_y(self, y):
"""
Set the *y* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashy = float(y)
def set_transform(self, t):
"""
Set the :class:`matplotlib.transforms.Transform` instance used
by this artist.
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Text.set_transform(self, t)
self.dashline.set_transform(t)
def get_figure(self):
'return the figure instance the artist belongs to'
return self.figure
def set_figure(self, fig):
"""
Set the figure instance the artist belong to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
Text.set_figure(self, fig)
self.dashline.set_figure(fig)
docstring.interpd.update(TextWithDash=artist.kwdoc(TextWithDash))
class OffsetFrom(object):
def __init__(self, artist, ref_coord, unit="points"):
self._artist = artist
self._ref_coord = ref_coord
self.set_unit(unit)
def set_unit(self, unit):
assert unit in ["points", "pixels"]
self._unit = unit
def get_unit(self):
return self._unit
def _get_scale(self, renderer):
unit = self.get_unit()
if unit == "pixels":
return 1.
else:
return renderer.points_to_pixels(1.)
def __call__(self, renderer):
if isinstance(self._artist, Artist):
bbox = self._artist.get_window_extent(renderer)
l, b, w, h = bbox.bounds
xf, yf = self._ref_coord
x, y = l + w * xf, b + h * yf
elif isinstance(self._artist, BboxBase):
l, b, w, h = self._artist.bounds
xf, yf = self._ref_coord
x, y = l + w * xf, b + h * yf
elif isinstance(self._artist, Transform):
x, y = self._artist.transform_point(self._ref_coord)
else:
raise RuntimeError("unknown type")
sc = self._get_scale(renderer)
tr = Affine2D().scale(sc, sc).translate(x, y)
return tr
class _AnnotationBase(object):
def __init__(self,
xy,
xycoords='data',
annotation_clip=None):
self.xy = xy
self.xycoords = xycoords
self.set_annotation_clip(annotation_clip)
self._draggable = None
def _get_xy(self, renderer, x, y, s):
if isinstance(s, tuple):
s1, s2 = s
else:
s1, s2 = s, s
if s1 == 'data':
x = float(self.convert_xunits(x))
if s2 == 'data':
y = float(self.convert_yunits(y))
tr = self._get_xy_transform(renderer, (x, y), s)
x1, y1 = tr.transform_point((x, y))
return x1, y1
def _get_xy_transform(self, renderer, xy, s):
if isinstance(s, tuple):
s1, s2 = s
from matplotlib.transforms import blended_transform_factory
tr1 = self._get_xy_transform(renderer, xy, s1)
tr2 = self._get_xy_transform(renderer, xy, s2)
tr = blended_transform_factory(tr1, tr2)
return tr
if six.callable(s):
tr = s(renderer)
if isinstance(tr, BboxBase):
return BboxTransformTo(tr)
elif isinstance(tr, Transform):
return tr
else:
raise RuntimeError("unknown return type ...")
if isinstance(s, Artist):
bbox = s.get_window_extent(renderer)
return BboxTransformTo(bbox)
elif isinstance(s, BboxBase):
return BboxTransformTo(s)
elif isinstance(s, Transform):
return s
elif not is_string_like(s):
raise RuntimeError("unknown coordinate type : %s" % (s,))
if s == 'data':
return self.axes.transData
elif s == 'polar':
from matplotlib.projections import PolarAxes
tr = PolarAxes.PolarTransform()
trans = tr + self.axes.transData
return trans
s_ = s.split()
if len(s_) != 2:
raise ValueError("%s is not a recognized coordinate" % s)
bbox0, xy0 = None, None
bbox_name, unit = s_
# if unit is offset-like
if bbox_name == "figure":
bbox0 = self.figure.bbox
elif bbox_name == "axes":
bbox0 = self.axes.bbox
# elif bbox_name == "bbox":
# if bbox is None:
# raise RuntimeError("bbox is specified as a coordinate but "
# "never set")
# bbox0 = self._get_bbox(renderer, bbox)
if bbox0 is not None:
x, y = xy
bounds = bbox0.extents
if x < 0:
x0 = bounds[2]
else:
x0 = bounds[0]
if y < 0:
y0 = bounds[3]
else:
y0 = bounds[1]
xy0 = (x0, y0)
elif bbox_name == "offset":
xy0 = self._get_ref_xy(renderer)
if xy0 is not None:
# reference x, y in display coordinate
ref_x, ref_y = xy0
from matplotlib.transforms import Affine2D
if unit == "points":
# dots per points
dpp = self.figure.get_dpi() / 72.
tr = Affine2D().scale(dpp, dpp)
elif unit == "pixels":
tr = Affine2D()
elif unit == "fontsize":
fontsize = self.get_size()
dpp = fontsize * self.figure.get_dpi() / 72.
tr = Affine2D().scale(dpp,
dpp)
elif unit == "fraction":
w, h = bbox0.bounds[2:]
tr = Affine2D().scale(w, h)
else:
raise ValueError("%s is not a recognized coordinate" % s)
return tr.translate(ref_x, ref_y)
else:
raise ValueError("%s is not a recognized coordinate" % s)
def _get_ref_xy(self, renderer):
"""
return x, y (in display coordinate) that is to be used for a reference
of any offset coordinate
"""
if isinstance(self.xycoords, tuple):
s1, s2 = self.xycoords
if ((is_string_like(s1) and s1.split()[0] == "offset") or
(is_string_like(s2) and s2.split()[0] == "offset")):
raise ValueError("xycoords should not be an offset coordinate")
x, y = self.xy
x1, y1 = self._get_xy(renderer, x, y, s1)
x2, y2 = self._get_xy(renderer, x, y, s2)
return x1, y2
elif (is_string_like(self.xycoords) and
self.xycoords.split()[0] == "offset"):
raise ValueError("xycoords should not be an offset coordinate")
else:
x, y = self.xy
return self._get_xy(renderer, x, y, self.xycoords)
#raise RuntimeError("must be defined by the derived class")
# def _get_bbox(self, renderer):
# if hasattr(bbox, "bounds"):
# return bbox
# elif hasattr(bbox, "get_window_extent"):
# bbox = bbox.get_window_extent()
# return bbox
# else:
# raise ValueError("A bbox instance is expected but got %s" %
# str(bbox))
def set_annotation_clip(self, b):
"""
set *annotation_clip* attribute.
* True: the annotation will only be drawn when self.xy is inside
the axes.
* False: the annotation will always be drawn regardless of its
position.
* None: the self.xy will be checked only if *xycoords* is "data"
"""
self._annotation_clip = b
def get_annotation_clip(self):
"""
Return *annotation_clip* attribute.
See :meth:`set_annotation_clip` for the meaning of return values.
"""
return self._annotation_clip
def _get_position_xy(self, renderer):
"Return the pixel position of the the annotated point."
x, y = self.xy
return self._get_xy(renderer, x, y, self.xycoords)
def _check_xy(self, renderer, xy_pixel):
"""
given the xy pixel coordinate, check if the annotation need to
be drawn.
"""
b = self.get_annotation_clip()
if b or (b is None and self.xycoords == "data"):
# check if self.xy is inside the axes.
if not self.axes.contains_point(xy_pixel):
return False
return True
def draggable(self, state=None, use_blit=False):
"""
Set the draggable state -- if state is
* None : toggle the current state
* True : turn draggable on
* False : turn draggable off
If draggable is on, you can drag the annotation on the canvas with
the mouse. The DraggableAnnotation helper instance is returned if
draggable is on.
"""
from matplotlib.offsetbox import DraggableAnnotation
is_draggable = self._draggable is not None
# if state is None we'll toggle
if state is None:
state = not is_draggable
if state:
if self._draggable is None:
self._draggable = DraggableAnnotation(self, use_blit)
else:
if self._draggable is not None:
self._draggable.disconnect()
self._draggable = None
return self._draggable
@property
@cbook.deprecated('1.4', message='Use `anncoords` instead',
name='textcoords', alternative='anncoords')
def textcoords(self):
return self.anncoords
@textcoords.setter
@cbook.deprecated('1.4', message='Use `anncoords` instead',
name='textcoords', alternative='anncoords')
def textcoords(self, val):
self.anncoords = val
@property
@cbook.deprecated('1.4', message='Use `xyann` instead',
name='xytext', alternative='xyann')
def xytext(self):
return self.xyann
@xytext.setter
@cbook.deprecated('1.4', message='Use `xyann` instead',
name='xytext', alternative='xyann')
def xytext(self, val):
self.xyann = val
class Annotation(Text, _AnnotationBase):
"""
A :class:`~matplotlib.text.Text` class to make annotating things
in the figure, such as :class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes`,
:class:`~matplotlib.patches.Rectangle`, etc., easier.
Annotate the *x*, *y* point *xy* with text *s* at *x*, *y*
location *xytext*. (If *xytext* = *None*, defaults to *xy*,
and if *textcoords* = *None*, defaults to *xycoords*).
"""
def __str__(self):
return "Annotation(%g,%g,%s)" % (self.xy[0],
self.xy[1],
repr(self._text))
@docstring.dedent_interpd
def __init__(self, s, xy,
xytext=None,
xycoords='data',
textcoords=None,
arrowprops=None,
annotation_clip=None,
**kwargs):
"""
*arrowprops*, if not *None*, is a dictionary of line properties
(see :class:`matplotlib.lines.Line2D`) for the arrow that connects
annotation to the point.
If the dictionary has a key *arrowstyle*, a
`~matplotlib.patches.FancyArrowPatch` instance is created with
the given dictionary and is drawn. Otherwise, a
`~matplotlib.patches.YAArrow` patch instance is created and
drawn. Valid keys for `~matplotlib.patches.YAArrow` are:
========= ===========================================================
Key Description
========= ===========================================================
width the width of the arrow in points
frac the fraction of the arrow length occupied by the head
headwidth the width of the base of the arrow head in points
shrink oftentimes it is convenient to have the arrowtip
and base a bit away from the text and point being
annotated. If *d* is the distance between the text and
annotated point, shrink will shorten the arrow so the tip
and base are shink percent of the distance *d* away from
the endpoints. i.e., ``shrink=0.05 is 5%%``
? any key for :class:`matplotlib.patches.polygon`
========= ===========================================================
Valid keys for `~matplotlib.patches.FancyArrowPatch` are:
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*xycoords* and *textcoords* are strings that indicate the
coordinates of *xy* and *xytext*, and may be one of the
following values:
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,0 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
If a 'points' or 'pixels' option is specified, values will be
added to the bottom-left and if negative, values will be
subtracted from the top-right. e.g.::
# 10 points to the right of the left border of the axes and
# 5 points below the top border
xy=(10,-5), xycoords='axes points'
You may use an instance of
:class:`~matplotlib.transforms.Transform` or
:class:`~matplotlib.artist.Artist`. See
:ref:`plotting-guide-annotation` for more details.
The *annotation_clip* attribute controls the visibility of the
annotation when it goes outside the axes area. If `True`, the
annotation will only be drawn when the *xy* is inside the
axes. If `False`, the annotation will always be drawn
regardless of its position. The default is `None`, which
behave as `True` only if *xycoords* is "data".
Additional kwargs are `~matplotlib.text.Text` properties:
%(Text)s
"""
_AnnotationBase.__init__(self,
xy,
xycoords=xycoords,
annotation_clip=annotation_clip)
# warn about wonky input data
if (xytext is None and
textcoords is not None and
textcoords != xycoords):
warnings.warn("You have used the `textcoords` kwarg, but not "
"the `xytext` kwarg. This can lead to surprising "
"results.")
# clean up textcoords and assign default
if textcoords is None:
textcoords = self.xycoords
self._textcoords = textcoords
# cleanup xytext defaults
if xytext is None:
xytext = self.xy
x, y = xytext
Text.__init__(self, x, y, s, **kwargs)
self.arrowprops = arrowprops
self.arrow = None
if arrowprops and "arrowstyle" in arrowprops:
arrowprops = self.arrowprops.copy()
self._arrow_relpos = arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
**arrowprops)
else:
self.arrow_patch = None
def contains(self, event):
contains, tinfo = Text.contains(self, event)
if self.arrow is not None:
in_arrow, _ = self.arrow.contains(event)
contains = contains or in_arrow
# self.arrow_patch is currently not checked as this can be a line - J
return contains, tinfo
@property
def xyann(self):
return self.get_position()
@xyann.setter
def xyann(self, xytext):
self.set_position(xytext)
@property
def anncoords(self):
return self._textcoords
@anncoords.setter
def anncoords(self, coords):
self._textcoords = coords
def set_figure(self, fig):
if self.arrow is not None:
self.arrow.set_figure(fig)
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
Artist.set_figure(self, fig)
def update_positions(self, renderer):
""""Update the pixel positions of the annotated point and the
text.
"""
xy_pixel = self._get_position_xy(renderer)
self._update_position_xytext(renderer, xy_pixel)
def _update_position_xytext(self, renderer, xy_pixel):
"""Update the pixel positions of the annotation text and the arrow
patch.
"""
# generate transformation,
self.set_transform(self._get_xy_transform(
renderer, self.xy, self.anncoords))
ox0, oy0 = self._get_xy_display()
ox1, oy1 = xy_pixel
if self.arrowprops:
x0, y0 = xy_pixel
l, b, w, h = Text.get_window_extent(self, renderer).bounds
r = l + w
t = b + h
xc = 0.5 * (l + r)
yc = 0.5 * (b + t)
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# Otherwise, fallback to YAArrow.
#if d.has_key("arrowstyle"):
if self.arrow_patch:
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
bbox = Text.get_window_extent(self, renderer)
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1, oy1))
mutation_scale = d.pop("mutation_scale", self.get_size())
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
if "patchA" in d:
self.arrow_patch.set_patchA(d.pop("patchA"))
else:
if self._bbox_patch:
self.arrow_patch.set_patchA(self._bbox_patch)
else:
props = self._bbox
if props is None:
props = {}
# don't want to alter the pad externally
props = props.copy()
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
if self.get_text().strip() == "":
self.arrow_patch.set_patchA(None)
return
bbox = Text.get_window_extent(self, renderer)
l, b, w, h = bbox.bounds
l -= pad / 2.
b -= pad / 2.
w += pad
h += pad
r = Rectangle(xy=(l, b),
width=w,
height=h,
)
r.set_transform(mtransforms.IdentityTransform())
r.set_clip_on(False)
r.update(props)
self.arrow_patch.set_patchA(r)
else:
# pick the x,y corner of the text bbox closest to point
# annotated
dsu = [(abs(val - x0), val) for val in (l, r, xc)]
dsu.sort()
_, x = dsu[0]
dsu = [(abs(val - y0), val) for val in (b, t, yc)]
dsu.sort()
_, y = dsu[0]
shrink = d.pop('shrink', 0.0)
theta = math.atan2(y - y0, x - x0)
r = np.hypot((y - y0), (x - x0))
dx = shrink * r * math.cos(theta)
dy = shrink * r * math.sin(theta)
width = d.pop('width', 4)
headwidth = d.pop('headwidth', 12)
frac = d.pop('frac', 0.1)
self.arrow = YAArrow(self.figure,
(x0 + dx, y0 + dy), (x - dx, y - dy),
width=width, headwidth=headwidth,
frac=frac,
**d)
self.arrow.set_clip_box(self.get_clip_box())
def update_bbox_position_size(self, renderer):
"""
Update the location and the size of the bbox. This method
should be used when the position and size of the bbox needs to
be updated before actually drawing the bbox.
"""
# For arrow_patch, use textbox as patchA by default.
if not isinstance(self.arrow_patch, FancyArrowPatch):
return
if self._bbox_patch:
posx, posy = self._x, self._y
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0., w_box, h_box)
theta = np.deg2rad(self.get_rotation())
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx + x_box, posy + y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
@allow_rasterization
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
xy_pixel = self._get_position_xy(renderer)
if not self._check_xy(renderer, xy_pixel):
return
self._update_position_xytext(renderer, xy_pixel)
self.update_bbox_position_size(renderer)
if self.arrow is not None:
if self.arrow.figure is None and self.figure is not None:
self.arrow.figure = self.figure
self.arrow.draw(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
Text.draw(self, renderer)
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text and arrow annotation, in display units.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure. The
*dpi* used defaults to self.figure.dpi; the renderer dpi is
irrelevant.
'''
if not self.get_visible():
return Bbox.unit()
arrow = self.arrow
arrow_patch = self.arrow_patch
text_bbox = Text.get_window_extent(self, renderer=renderer)
bboxes = [text_bbox]
if self.arrow is not None:
bboxes.append(arrow.get_window_extent(renderer=renderer))
elif self.arrow_patch is not None:
bboxes.append(arrow_patch.get_window_extent(renderer=renderer))
return Bbox.union(bboxes)
docstring.interpd.update(Annotation=Annotation.__init__.__doc__)
| lgpl-3.0 |
bert9bert/statsmodels | statsmodels/tsa/statespace/tools.py | 1 | 67506 | """
Statespace Tools
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from scipy.linalg import solve_sylvester
import pandas as pd
from statsmodels.tools.data import _is_using_pandas
import warnings
compatibility_mode = False
has_trmm = True
prefix_dtype_map = {
's': np.float32, 'd': np.float64, 'c': np.complex64, 'z': np.complex128
}
prefix_statespace_map = {}
prefix_kalman_filter_map = {}
prefix_kalman_smoother_map = {}
prefix_simulation_smoother_map = {}
prefix_pacf_map = {}
prefix_sv_map = {}
prefix_reorder_missing_matrix_map = {}
prefix_reorder_missing_vector_map = {}
prefix_copy_missing_matrix_map = {}
prefix_copy_missing_vector_map = {}
prefix_copy_index_matrix_map = {}
prefix_copy_index_vector_map = {}
def set_mode(compatibility=None):
global compatibility_mode, has_trmm, prefix_statespace_map, \
prefix_kalman_filter_map, prefix_kalman_smoother_map, \
prefix_simulation_smoother_map, prefix_pacf_map, prefix_sv_map
# Determine mode automatically if none given
if compatibility is None:
try:
from scipy.linalg import cython_blas
compatibility = False
except ImportError:
compatibility = True
# If compatibility was False, make sure that is possible
if not compatibility:
try:
from scipy.linalg import cython_blas
except ImportError:
warnings.warn('Minimum dependencies not met. Compatibility mode'
' enabled.')
compatibility = True
# Initialize the appropriate mode
if not compatibility:
from scipy.linalg import cython_blas
from . import (_representation, _kalman_filter, _kalman_smoother,
_simulation_smoother, _tools)
compatibility_mode = False
prefix_statespace_map.update({
's': _representation.sStatespace, 'd': _representation.dStatespace,
'c': _representation.cStatespace, 'z': _representation.zStatespace
})
prefix_kalman_filter_map.update({
's': _kalman_filter.sKalmanFilter,
'd': _kalman_filter.dKalmanFilter,
'c': _kalman_filter.cKalmanFilter,
'z': _kalman_filter.zKalmanFilter
})
prefix_kalman_smoother_map.update({
's': _kalman_smoother.sKalmanSmoother,
'd': _kalman_smoother.dKalmanSmoother,
'c': _kalman_smoother.cKalmanSmoother,
'z': _kalman_smoother.zKalmanSmoother
})
prefix_simulation_smoother_map.update({
's': _simulation_smoother.sSimulationSmoother,
'd': _simulation_smoother.dSimulationSmoother,
'c': _simulation_smoother.cSimulationSmoother,
'z': _simulation_smoother.zSimulationSmoother
})
prefix_pacf_map.update({
's': _tools._scompute_coefficients_from_multivariate_pacf,
'd': _tools._dcompute_coefficients_from_multivariate_pacf,
'c': _tools._ccompute_coefficients_from_multivariate_pacf,
'z': _tools._zcompute_coefficients_from_multivariate_pacf
})
prefix_sv_map.update({
's': _tools._sconstrain_sv_less_than_one,
'd': _tools._dconstrain_sv_less_than_one,
'c': _tools._cconstrain_sv_less_than_one,
'z': _tools._zconstrain_sv_less_than_one
})
prefix_reorder_missing_matrix_map.update({
's': _tools.sreorder_missing_matrix,
'd': _tools.dreorder_missing_matrix,
'c': _tools.creorder_missing_matrix,
'z': _tools.zreorder_missing_matrix
})
prefix_reorder_missing_vector_map.update({
's': _tools.sreorder_missing_vector,
'd': _tools.dreorder_missing_vector,
'c': _tools.creorder_missing_vector,
'z': _tools.zreorder_missing_vector
})
prefix_copy_missing_matrix_map.update({
's': _tools.scopy_missing_matrix,
'd': _tools.dcopy_missing_matrix,
'c': _tools.ccopy_missing_matrix,
'z': _tools.zcopy_missing_matrix
})
prefix_copy_missing_vector_map.update({
's': _tools.scopy_missing_vector,
'd': _tools.dcopy_missing_vector,
'c': _tools.ccopy_missing_vector,
'z': _tools.zcopy_missing_vector
})
prefix_copy_index_matrix_map.update({
's': _tools.scopy_index_matrix,
'd': _tools.dcopy_index_matrix,
'c': _tools.ccopy_index_matrix,
'z': _tools.zcopy_index_matrix
})
prefix_copy_index_vector_map.update({
's': _tools.scopy_index_vector,
'd': _tools.dcopy_index_vector,
'c': _tools.ccopy_index_vector,
'z': _tools.zcopy_index_vector
})
else:
from . import _statespace
from ._pykalman_smoother import _KalmanSmoother
compatibility_mode = True
try:
from scipy.linalg.blas import dtrmm
except ImportError:
has_trmm = False
prefix_statespace_map.update({
's': _statespace.sStatespace, 'd': _statespace.dStatespace,
'c': _statespace.cStatespace, 'z': _statespace.zStatespace
})
prefix_kalman_filter_map.update({
's': _statespace.sKalmanFilter, 'd': _statespace.dKalmanFilter,
'c': _statespace.cKalmanFilter, 'z': _statespace.zKalmanFilter
})
prefix_kalman_smoother_map.update({
's': _KalmanSmoother, 'd': _KalmanSmoother,
'c': _KalmanSmoother, 'z': _KalmanSmoother
})
prefix_simulation_smoother_map.update({
's': None, 'd': None, 'c': None, 'z': None
})
if has_trmm:
prefix_pacf_map.update({
's': _statespace._scompute_coefficients_from_multivariate_pacf,
'd': _statespace._dcompute_coefficients_from_multivariate_pacf,
'c': _statespace._ccompute_coefficients_from_multivariate_pacf,
'z': _statespace._zcompute_coefficients_from_multivariate_pacf
})
prefix_sv_map.update({
's': _statespace._sconstrain_sv_less_than_one,
'd': _statespace._dconstrain_sv_less_than_one,
'c': _statespace._cconstrain_sv_less_than_one,
'z': _statespace._zconstrain_sv_less_than_one
})
prefix_reorder_missing_matrix_map.update({
's': _statespace.sreorder_missing_matrix,
'd': _statespace.dreorder_missing_matrix,
'c': _statespace.creorder_missing_matrix,
'z': _statespace.zreorder_missing_matrix
})
prefix_reorder_missing_vector_map.update({
's': _statespace.sreorder_missing_vector,
'd': _statespace.dreorder_missing_vector,
'c': _statespace.creorder_missing_vector,
'z': _statespace.zreorder_missing_vector
})
prefix_copy_missing_matrix_map.update({
's': _statespace.scopy_missing_matrix,
'd': _statespace.dcopy_missing_matrix,
'c': _statespace.ccopy_missing_matrix,
'z': _statespace.zcopy_missing_matrix
})
prefix_copy_missing_vector_map.update({
's': _statespace.scopy_missing_vector,
'd': _statespace.dcopy_missing_vector,
'c': _statespace.ccopy_missing_vector,
'z': _statespace.zcopy_missing_vector
})
prefix_copy_index_matrix_map.update({
's': _statespace.scopy_index_matrix,
'd': _statespace.dcopy_index_matrix,
'c': _statespace.ccopy_index_matrix,
'z': _statespace.zcopy_index_matrix
})
prefix_copy_index_vector_map.update({
's': _statespace.scopy_index_vector,
'd': _statespace.dcopy_index_vector,
'c': _statespace.ccopy_index_vector,
'z': _statespace.zcopy_index_vector
})
set_mode(compatibility=None)
try:
from scipy.linalg.blas import find_best_blas_type
except ImportError: # pragma: no cover
# Shim for SciPy 0.11, derived from tag=0.11 scipy.linalg.blas
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z', 'G': 'z'}
def find_best_blas_type(arrays):
dtype, index = max(
[(ar.dtype, i) for i, ar in enumerate(arrays)])
prefix = _type_conv.get(dtype.char, 'd')
return prefix, dtype, None
def companion_matrix(polynomial):
r"""
Create a companion matrix
Parameters
----------
polynomial : array_like or list
If an iterable, interpreted as the coefficients of the polynomial from
which to form the companion matrix. Polynomial coefficients are in
order of increasing degree, and may be either scalars (as in an AR(p)
model) or coefficient matrices (as in a VAR(p) model). If an integer,
it is interpereted as the size of a companion matrix of a scalar
polynomial, where the polynomial coefficients are initialized to zeros.
If a matrix polynomial is passed, :math:`C_0` may be set to the scalar
value 1 to indicate an identity matrix (doing so will improve the speed
of the companion matrix creation).
Returns
-------
companion_matrix : array
Notes
-----
Given coefficients of a lag polynomial of the form:
.. math::
c(L) = c_0 + c_1 L + \dots + c_p L^p
returns a matrix of the form
.. math::
\begin{bmatrix}
\phi_1 & 1 & 0 & \cdots & 0 \\
\phi_2 & 0 & 1 & & 0 \\
\vdots & & & \ddots & 0 \\
& & & & 1 \\
\phi_n & 0 & 0 & \cdots & 0 \\
\end{bmatrix}
where some or all of the :math:`\phi_i` may be non-zero (if `polynomial` is
None, then all are equal to zero).
If the coefficients provided are scalars :math:`(c_0, c_1, \dots, c_p)`,
then the companion matrix is an :math:`n \times n` matrix formed with the
elements in the first column defined as
:math:`\phi_i = -\frac{c_i}{c_0}, i \in 1, \dots, p`.
If the coefficients provided are matrices :math:`(C_0, C_1, \dots, C_p)`,
each of shape :math:`(m, m)`, then the companion matrix is an
:math:`nm \times nm` matrix formed with the elements in the first column
defined as :math:`\phi_i = -C_0^{-1} C_i', i \in 1, \dots, p`.
It is important to understand the expected signs of the coefficients. A
typical AR(p) model is written as:
.. math::
y_t = a_1 y_{t-1} + \dots + a_p y_{t-p} + \varepsilon_t
This can be rewritten as:
.. math::
(1 - a_1 L - \dots - a_p L^p )y_t = \varepsilon_t \\
(1 + c_1 L + \dots + c_p L^p )y_t = \varepsilon_t \\
c(L) y_t = \varepsilon_t
The coefficients from this form are defined to be :math:`c_i = - a_i`, and
it is the :math:`c_i` coefficients that this function expects to be
provided.
"""
identity_matrix = False
if isinstance(polynomial, int):
n = polynomial
m = 1
polynomial = None
else:
n = len(polynomial) - 1
if n < 1:
raise ValueError("Companion matrix polynomials must include at"
" least two terms.")
if isinstance(polynomial, list) or isinstance(polynomial, tuple):
try:
# Note: can't use polynomial[0] because of the special behavior
# associated with matrix polynomials and the constant 1, see
# below.
m = len(polynomial[1])
except TypeError:
m = 1
# Check if we just have a scalar polynomial
if m == 1:
polynomial = np.asanyarray(polynomial)
# Check if 1 was passed as the first argument (indicating an
# identity matrix)
elif polynomial[0] == 1:
polynomial[0] = np.eye(m)
identity_matrix = True
else:
m = 1
polynomial = np.asanyarray(polynomial)
matrix = np.zeros((n * m, n * m), dtype=np.asanyarray(polynomial).dtype)
idx = np.diag_indices((n - 1) * m)
idx = (idx[0], idx[1] + m)
matrix[idx] = 1
if polynomial is not None and n > 0:
if m == 1:
matrix[:, 0] = -polynomial[1:] / polynomial[0]
elif identity_matrix:
for i in range(n):
matrix[i * m:(i + 1) * m, :m] = -polynomial[i+1].T
else:
inv = np.linalg.inv(polynomial[0])
for i in range(n):
matrix[i * m:(i + 1) * m, :m] = -np.dot(inv, polynomial[i+1]).T
return matrix
def diff(series, k_diff=1, k_seasonal_diff=None, seasonal_periods=1):
r"""
Difference a series simply and/or seasonally along the zero-th axis.
Given a series (denoted :math:`y_t`), performs the differencing operation
.. math::
\Delta^d \Delta_s^D y_t
where :math:`d =` `diff`, :math:`s =` `seasonal_periods`,
:math:`D =` `seasonal\_diff`, and :math:`\Delta` is the difference
operator.
Parameters
----------
series : array_like
The series to be differenced.
diff : int, optional
The number of simple differences to perform. Default is 1.
seasonal_diff : int or None, optional
The number of seasonal differences to perform. Default is no seasonal
differencing.
seasonal_periods : int, optional
The seasonal lag. Default is 1. Unused if there is no seasonal
differencing.
Returns
-------
differenced : array
The differenced array.
"""
pandas = _is_using_pandas(series, None)
differenced = np.asanyarray(series) if not pandas else series
# Seasonal differencing
if k_seasonal_diff is not None:
while k_seasonal_diff > 0:
if not pandas:
differenced = (
differenced[seasonal_periods:] - differenced[:-seasonal_periods]
)
else:
differenced = differenced.diff(seasonal_periods)[seasonal_periods:]
k_seasonal_diff -= 1
# Simple differencing
if not pandas:
differenced = np.diff(differenced, k_diff, axis=0)
else:
while k_diff > 0:
differenced = differenced.diff()[1:]
k_diff -= 1
return differenced
def concat(series, axis=0, allow_mix=False):
"""
Concatenate a set of series.
Parameters
----------
series : iterable
An iterable of series to be concatenated
axis : int, optional
The axis along which to concatenate. Default is 1 (columns).
allow_mix : bool
Whether or not to allow a mix of pandas and non-pandas objects. Default
is False. If true, the returned object is an ndarray, and additional
pandas metadata (e.g. column names, indices, etc) is lost.
Returns
-------
concatenated : array or pd.DataFrame
The concatenated array. Will be a DataFrame if series are pandas
objects.
"""
is_pandas = np.r_[[_is_using_pandas(s, None) for s in series]]
if np.all(is_pandas):
concatenated = pd.concat(series, axis=axis)
elif np.all(~is_pandas) or allow_mix:
concatenated = np.concatenate(series, axis=axis)
else:
raise ValueError('Attempted to concatenate Pandas objects with'
' non-Pandas objects with `allow_mix=False`.')
return concatenated
def is_invertible(polynomial, threshold=1.):
r"""
Determine if a polynomial is invertible.
Requires all roots of the polynomial lie inside the unit circle.
Parameters
----------
polynomial : array_like or tuple, list
Coefficients of a polynomial, in order of increasing degree.
For example, `polynomial=[1, -0.5]` corresponds to the polynomial
:math:`1 - 0.5x` which has root :math:`2`. If it is a matrix
polynomial (in which case the coefficients are coefficient matrices),
a tuple or list of matrices should be passed.
threshold : number
Allowed threshold for `is_invertible` to return True. Default is 1.
Notes
-----
If the coefficients provided are scalars :math:`(c_0, c_1, \dots, c_n)`,
then the corresponding polynomial is :math:`c_0 + c_1 L + \dots + c_n L^n`.
If the coefficients provided are matrices :math:`(C_0, C_1, \dots, C_n)`,
then the corresponding polynomial is :math:`C_0 + C_1 L + \dots + C_n L^n`.
There are three equivalent methods of determining if the polynomial
represented by the coefficients is invertible:
The first method factorizes the polynomial into:
.. math::
C(L) & = c_0 + c_1 L + \dots + c_n L^n \\
& = constant (1 - \lambda_1 L)
(1 - \lambda_2 L) \dots (1 - \lambda_n L)
In order for :math:`C(L)` to be invertible, it must be that each factor
:math:`(1 - \lambda_i L)` is invertible; the condition is then that
:math:`|\lambda_i| < 1`, where :math:`\lambda_i` is a root of the
polynomial.
The second method factorizes the polynomial into:
.. math::
C(L) & = c_0 + c_1 L + \dots + c_n L^n \\
& = constant (L - \zeta_1) (L - \zeta_2) \dots (L - \zeta_3)
The condition is now :math:`|\zeta_i| > 1`, where :math:`\zeta_i` is a root
of the polynomial with reversed coefficients and
:math:`\lambda_i = \frac{1}{\zeta_i}`.
Finally, a companion matrix can be formed using the coefficients of the
polynomial. Then the eigenvalues of that matrix give the roots of the
polynomial. This last method is the one actually used.
See Also
--------
companion_matrix
"""
# First method:
# np.all(np.abs(np.roots(np.r_[1, params])) < 1)
# Second method:
# np.all(np.abs(np.roots(np.r_[1, params][::-1])) > 1)
# Final method:
eigvals = np.linalg.eigvals(companion_matrix(polynomial))
return np.all(np.abs(eigvals) < threshold)
def solve_discrete_lyapunov(a, q, complex_step=False):
r"""
Solves the discrete Lyapunov equation using a bilinear transformation.
Notes
-----
This is a modification of the version in Scipy (see
https://github.com/scipy/scipy/blob/master/scipy/linalg/_solvers.py)
which allows passing through the complex numbers in the matrix a
(usually the transition matrix) in order to allow complex step
differentiation.
"""
eye = np.eye(a.shape[0], dtype=a.dtype)
if not complex_step:
aH = a.conj().transpose()
aHI_inv = np.linalg.inv(aH + eye)
b = np.dot(aH - eye, aHI_inv)
c = 2*np.dot(np.dot(np.linalg.inv(a + eye), q), aHI_inv)
return solve_sylvester(b.conj().transpose(), b, -c)
else:
aH = a.transpose()
aHI_inv = np.linalg.inv(aH + eye)
b = np.dot(aH - eye, aHI_inv)
c = 2*np.dot(np.dot(np.linalg.inv(a + eye), q), aHI_inv)
return solve_sylvester(b.transpose(), b, -c)
def constrain_stationary_univariate(unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array
Unconstrained parameters used by the optimizer, to be transformed to
stationary coefficients of, e.g., an autoregressive or moving average
component.
Returns
-------
constrained : array
Constrained parameters of, e.g., an autoregressive or moving average
component, to be transformed to arbitrary parameters used by the
optimizer.
References
----------
.. [1] Monahan, John F. 1984.
"A Note on Enforcing Stationarity in
Autoregressive-moving Average Models."
Biometrika 71 (2) (August 1): 403-404.
"""
n = unconstrained.shape[0]
y = np.zeros((n, n), dtype=unconstrained.dtype)
r = unconstrained/((1 + unconstrained**2)**0.5)
for k in range(n):
for i in range(k):
y[k, i] = y[k - 1, i] + r[k] * y[k - 1, k - i - 1]
y[k, k] = r[k]
return -y[n - 1, :]
def unconstrain_stationary_univariate(constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array
Constrained parameters of, e.g., an autoregressive or moving average
component, to be transformed to arbitrary parameters used by the
optimizer.
Returns
-------
unconstrained : array
Unconstrained parameters used by the optimizer, to be transformed to
stationary coefficients of, e.g., an autoregressive or moving average
component.
References
----------
.. [1] Monahan, John F. 1984.
"A Note on Enforcing Stationarity in
Autoregressive-moving Average Models."
Biometrika 71 (2) (August 1): 403-404.
"""
n = constrained.shape[0]
y = np.zeros((n, n), dtype=constrained.dtype)
y[n-1:] = -constrained
for k in range(n-1, 0, -1):
for i in range(k):
y[k-1, i] = (y[k, i] - y[k, k]*y[k, k-i-1]) / (1 - y[k, k]**2)
r = y.diagonal()
x = r / ((1 - r**2)**0.5)
return x
def _constrain_sv_less_than_one_python(unconstrained, order=None,
k_endog=None):
"""
Transform arbitrary matrices to matrices with singular values less than
one.
Parameters
----------
unconstrained : list
Arbitrary matrices. Should be a list of length `order`, where each
element is an array sized `k_endog` x `k_endog`.
order : integer, optional
The order of the autoregression.
k_endog : integer, optional
The dimension of the data vector.
Returns
-------
constrained : list
Partial autocorrelation matrices. Should be a list of length
`order`, where each element is an array sized `k_endog` x `k_endog`.
Notes
-----
Corresponds to Lemma 2.2 in Ansley and Kohn (1986). See
`constrain_stationary_multivariate` for more details.
There is a Cython implementation of this function that can be much faster,
but which requires SciPy 0.14.0 or greater. See
`constrain_stationary_multivariate` for details.
"""
from scipy import linalg
constrained = [] # P_s, s = 1, ..., p
if order is None:
order = len(unconstrained)
if k_endog is None:
k_endog = unconstrained[0].shape[0]
eye = np.eye(k_endog)
for i in range(order):
A = unconstrained[i]
B, lower = linalg.cho_factor(eye + np.dot(A, A.T), lower=True)
constrained.append(linalg.solve_triangular(B, A, lower=lower))
return constrained
def _compute_coefficients_from_multivariate_pacf_python(
partial_autocorrelations, error_variance, transform_variance=False,
order=None, k_endog=None):
"""
Transform matrices with singular values less than one to matrices
corresponding to a stationary (or invertible) process.
Parameters
----------
partial_autocorrelations : list
Partial autocorrelation matrices. Should be a list of length `order`,
where each element is an array sized `k_endog` x `k_endog`.
error_variance : array
The variance / covariance matrix of the error term. Should be sized
`k_endog` x `k_endog`. This is used as input in the algorithm even if
is not transformed by it (when `transform_variance` is False). The
error term variance is required input when transformation is used
either to force an autoregressive component to be stationary or to
force a moving average component to be invertible.
transform_variance : boolean, optional
Whether or not to transform the error variance term. This option is
not typically used, and the default is False.
order : integer, optional
The order of the autoregression.
k_endog : integer, optional
The dimension of the data vector.
Returns
-------
coefficient_matrices : list
Transformed coefficient matrices leading to a stationary VAR
representation.
Notes
-----
Corresponds to Lemma 2.1 in Ansley and Kohn (1986). See
`constrain_stationary_multivariate` for more details.
There is a Cython implementation of this function that can be much faster,
but which requires SciPy 0.14.0 or greater. See
`constrain_stationary_multivariate` for details.
"""
from scipy import linalg
if order is None:
order = len(partial_autocorrelations)
if k_endog is None:
k_endog = partial_autocorrelations[0].shape[0]
# If we want to keep the provided variance but with the constrained
# coefficient matrices, we need to make a copy here, and then after the
# main loop we will transform the coefficients to match the passed variance
if not transform_variance:
initial_variance = error_variance
# Need to make the input variance large enough that the recursions
# don't lead to zero-matrices due to roundoff error, which would case
# exceptions from the Cholesky decompositions.
# Note that this will still not always ensure positive definiteness,
# and for k_endog, order large enough an exception may still be raised
error_variance = np.eye(k_endog) * (order + k_endog)**10
forward_variances = [error_variance] # \Sigma_s
backward_variances = [error_variance] # \Sigma_s^*, s = 0, ..., p
autocovariances = [error_variance] # \Gamma_s
# \phi_{s,k}, s = 1, ..., p
# k = 1, ..., s+1
forwards = []
# \phi_{s,k}^*
backwards = []
error_variance_factor = linalg.cholesky(error_variance, lower=True)
forward_factors = [error_variance_factor]
backward_factors = [error_variance_factor]
# We fill in the entries as follows:
# [1,1]
# [2,2], [2,1]
# [3,3], [3,1], [3,2]
# ...
# [p,p], [p,1], ..., [p,p-1]
# the last row, correctly ordered, is then used as the coefficients
for s in range(order): # s = 0, ..., p-1
prev_forwards = forwards
prev_backwards = backwards
forwards = []
backwards = []
# Create the "last" (k = s+1) matrix
# Note: this is for k = s+1. However, below we then have to fill
# in for k = 1, ..., s in order.
# P L*^{-1} = x
# x L* = P
# L*' x' = P'
forwards.append(
linalg.solve_triangular(
backward_factors[s], partial_autocorrelations[s].T,
lower=True, trans='T'))
forwards[0] = np.dot(forward_factors[s], forwards[0].T)
# P' L^{-1} = x
# x L = P'
# L' x' = P
backwards.append(
linalg.solve_triangular(
forward_factors[s], partial_autocorrelations[s],
lower=True, trans='T'))
backwards[0] = np.dot(backward_factors[s], backwards[0].T)
# Update the variance
# Note: if s >= 1, this will be further updated in the for loop
# below
# Also, this calculation will be re-used in the forward variance
tmp = np.dot(forwards[0], backward_variances[s])
autocovariances.append(tmp.copy().T)
# Create the remaining k = 1, ..., s matrices,
# only has an effect if s >= 1
for k in range(s):
forwards.insert(k, prev_forwards[k] - np.dot(
forwards[-1], prev_backwards[s-(k+1)]))
backwards.insert(k, prev_backwards[k] - np.dot(
backwards[-1], prev_forwards[s-(k+1)]))
autocovariances[s+1] += np.dot(autocovariances[k+1],
prev_forwards[s-(k+1)].T)
# Create forward and backwards variances
forward_variances.append(
forward_variances[s] - np.dot(tmp, forwards[s].T)
)
backward_variances.append(
backward_variances[s] -
np.dot(
np.dot(backwards[s], forward_variances[s]),
backwards[s].T
)
)
# Cholesky factors
forward_factors.append(
linalg.cholesky(forward_variances[s+1], lower=True)
)
backward_factors.append(
linalg.cholesky(backward_variances[s+1], lower=True)
)
# If we do not want to use the transformed variance, we need to
# adjust the constrained matrices, as presented in Lemma 2.3, see above
variance = forward_variances[-1]
if not transform_variance:
# Here, we need to construct T such that:
# variance = T * initial_variance * T'
# To do that, consider the Cholesky of variance (L) and
# input_variance (M) to get:
# L L' = T M M' T' = (TM) (TM)'
# => L = T M
# => L M^{-1} = T
initial_variance_factor = np.linalg.cholesky(initial_variance)
transformed_variance_factor = np.linalg.cholesky(variance)
transform = np.dot(initial_variance_factor,
np.linalg.inv(transformed_variance_factor))
inv_transform = np.linalg.inv(transform)
for i in range(order):
forwards[i] = (
np.dot(np.dot(transform, forwards[i]), inv_transform)
)
return forwards, variance
def constrain_stationary_multivariate_python(unconstrained, error_variance,
transform_variance=False,
prefix=None):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation for a vector autoregression.
Parameters
----------
unconstrained : array or list
Arbitrary matrices to be transformed to stationary coefficient matrices
of the VAR. If a list, should be a list of length `order`, where each
element is an array sized `k_endog` x `k_endog`. If an array, should be
the matrices horizontally concatenated and sized
`k_endog` x `k_endog * order`.
error_variance : array
The variance / covariance matrix of the error term. Should be sized
`k_endog` x `k_endog`. This is used as input in the algorithm even if
is not transformed by it (when `transform_variance` is False). The
error term variance is required input when transformation is used
either to force an autoregressive component to be stationary or to
force a moving average component to be invertible.
transform_variance : boolean, optional
Whether or not to transform the error variance term. This option is
not typically used, and the default is False.
prefix : {'s','d','c','z'}, optional
The appropriate BLAS prefix to use for the passed datatypes. Only
use if absolutely sure that the prefix is correct or an error will
result.
Returns
-------
constrained : array or list
Transformed coefficient matrices leading to a stationary VAR
representation. Will match the type of the passed `unconstrained`
variable (so if a list was passed, a list will be returned).
Notes
-----
In the notation of [1]_, the arguments `(variance, unconstrained)` are
written as :math:`(\Sigma, A_1, \dots, A_p)`, where :math:`p` is the order
of the vector autoregression, and is here determined by the length of
the `unconstrained` argument.
There are two steps in the constraining algorithm.
First, :math:`(A_1, \dots, A_p)` are transformed into
:math:`(P_1, \dots, P_p)` via Lemma 2.2 of [1]_.
Second, :math:`(\Sigma, P_1, \dots, P_p)` are transformed into
:math:`(\Sigma, \phi_1, \dots, \phi_p)` via Lemmas 2.1 and 2.3 of [1]_.
If `transform_variance=True`, then only Lemma 2.1 is applied in the second
step.
While this function can be used even in the univariate case, it is much
slower, so in that case `constrain_stationary_univariate` is preferred.
References
----------
.. [1] Ansley, Craig F., and Robert Kohn. 1986.
"A Note on Reparameterizing a Vector Autoregressive Moving Average Model
to Enforce Stationarity."
Journal of Statistical Computation and Simulation 24 (2): 99-106.
.. [2] Ansley, Craig F, and Paul Newbold. 1979.
"Multivariate Partial Autocorrelations."
In Proceedings of the Business and Economic Statistics Section, 349-53.
American Statistical Association
"""
use_list = type(unconstrained) == list
if not use_list:
k_endog, order = unconstrained.shape
order //= k_endog
unconstrained = [
unconstrained[:k_endog, i*k_endog:(i+1)*k_endog]
for i in range(order)
]
order = len(unconstrained)
k_endog = unconstrained[0].shape[0]
# Step 1: convert from arbitrary matrices to those with singular values
# less than one.
sv_constrained = _constrain_sv_less_than_one_python(
unconstrained, order, k_endog)
# Step 2: convert matrices from our "partial autocorrelation matrix" space
# (matrices with singular values less than one) to the space of stationary
# coefficient matrices
constrained, var = _compute_coefficients_from_multivariate_pacf_python(
sv_constrained, error_variance, transform_variance, order, k_endog)
if not use_list:
constrained = np.concatenate(constrained, axis=1).reshape(
k_endog, k_endog * order)
return constrained, var
# Conditionally use the Cython versions of the multivariate constraint if
# possible (i.e. if Scipy >= 0.14.0 is available.)
if has_trmm:
def constrain_stationary_multivariate(unconstrained, variance,
transform_variance=False,
prefix=None):
use_list = type(unconstrained) == list
if use_list:
unconstrained = np.concatenate(unconstrained, axis=1)
k_endog, order = unconstrained.shape
order //= k_endog
if order < 1:
raise ValueError('Must have order at least 1')
if k_endog < 1:
raise ValueError('Must have at least 1 endogenous variable')
if prefix is None:
prefix, dtype, _ = find_best_blas_type(
[unconstrained, variance])
dtype = prefix_dtype_map[prefix]
unconstrained = np.asfortranarray(unconstrained, dtype=dtype)
variance = np.asfortranarray(variance, dtype=dtype)
# Step 1: convert from arbitrary matrices to those with singular values
# less than one.
# sv_constrained = _constrain_sv_less_than_one(unconstrained, order,
# k_endog, prefix)
sv_constrained = prefix_sv_map[prefix](unconstrained, order, k_endog)
# Step 2: convert matrices from our "partial autocorrelation matrix"
# space (matrices with singular values less than one) to the space of
# stationary coefficient matrices
constrained, variance = prefix_pacf_map[prefix](
sv_constrained, variance, transform_variance, order, k_endog)
constrained = np.array(constrained, dtype=dtype)
variance = np.array(variance, dtype=dtype)
if use_list:
constrained = [
constrained[:k_endog, i*k_endog:(i+1)*k_endog]
for i in range(order)
]
return constrained, variance
constrain_stationary_multivariate.__doc__ = (
constrain_stationary_multivariate_python.__doc__)
else:
constrain_stationary_multivariate = (
constrain_stationary_multivariate_python)
def _unconstrain_sv_less_than_one(constrained, order=None, k_endog=None):
"""
Transform matrices with singular values less than one to arbitrary
matrices.
Parameters
----------
constrained : list
The partial autocorrelation matrices. Should be a list of length
`order`, where each element is an array sized `k_endog` x `k_endog`.
order : integer, optional
The order of the autoregression.
k_endog : integer, optional
The dimension of the data vector.
Returns
-------
unconstrained : list
Unconstrained matrices. A list of length `order`, where each element is
an array sized `k_endog` x `k_endog`.
Notes
-----
Corresponds to the inverse of Lemma 2.2 in Ansley and Kohn (1986). See
`unconstrain_stationary_multivariate` for more details.
"""
from scipy import linalg
unconstrained = [] # A_s, s = 1, ..., p
if order is None:
order = len(constrained)
if k_endog is None:
k_endog = constrained[0].shape[0]
eye = np.eye(k_endog)
for i in range(order):
P = constrained[i]
# B^{-1} B^{-1}' = I - P P'
B_inv, lower = linalg.cho_factor(eye - np.dot(P, P.T), lower=True)
# A = BP
# B^{-1} A = P
unconstrained.append(linalg.solve_triangular(B_inv, P, lower=lower))
return unconstrained
def _compute_multivariate_sample_acovf(endog, maxlag):
"""
Computer multivariate sample autocovariances
Parameters
----------
endog : array_like
Sample data on which to compute sample autocovariances. Shaped
`nobs` x `k_endog`.
Returns
-------
sample_autocovariances : list
A list of the first `maxlag` sample autocovariance matrices. Each
matrix is shaped `k_endog` x `k_endog`.
Notes
-----
This function computes the forward sample autocovariances:
.. math::
\hat \Gamma(s) = \frac{1}{n} \sum_{t=1}^{n-s}
(Z_t - \bar Z) (Z_{t+s} - \bar Z)'
See page 353 of Wei (1990). This function is primarily implemented for
checking the partial autocorrelation functions below, and so is quite slow.
References
----------
.. [1] Wei, William. 1990.
Time Series Analysis : Univariate and Multivariate Methods.
Boston: Pearson.
"""
# Get the (demeaned) data as an array
endog = np.array(endog)
if endog.ndim == 1:
endog = endog[:, np.newaxis]
endog -= np.mean(endog, axis=0)
# Dimensions
nobs, k_endog = endog.shape
sample_autocovariances = []
for s in range(maxlag + 1):
sample_autocovariances.append(np.zeros((k_endog, k_endog)))
for t in range(nobs - s):
sample_autocovariances[s] += np.outer(endog[t], endog[t+s])
sample_autocovariances[s] /= nobs
return sample_autocovariances
def _compute_multivariate_acovf_from_coefficients(
coefficients, error_variance, maxlag=None,
forward_autocovariances=False):
r"""
Compute multivariate autocovariances from vector autoregression coefficient
matrices
Parameters
----------
coefficients : array or list
The coefficients matrices. If a list, should be a list of length
`order`, where each element is an array sized `k_endog` x `k_endog`. If
an array, should be the coefficient matrices horizontally concatenated
and sized `k_endog` x `k_endog * order`.
error_variance : array
The variance / covariance matrix of the error term. Should be sized
`k_endog` x `k_endog`.
maxlag : integer, optional
The maximum autocovariance to compute. Default is `order`-1. Can be
zero, in which case it returns the variance.
forward_autocovariances : boolean, optional
Whether or not to compute forward autocovariances
:math:`E(y_t y_{t+j}')`. Default is False, so that backward
autocovariances :math:`E(y_t y_{t-j}')` are returned.
Returns
-------
autocovariances : list
A list of the first `maxlag` autocovariance matrices. Each matrix is
shaped `k_endog` x `k_endog`.
Notes
-----
Computes
.. math::
\Gamma(j) = E(y_t y_{t-j}')
for j = 1, ..., `maxlag`, unless `forward_autocovariances` is specified,
in which case it computes:
.. math::
E(y_t y_{t+j}') = \Gamma(j)'
Coefficients are assumed to be provided from the VAR model:
.. math::
y_t = A_1 y_{t-1} + \dots + A_p y_{t-p} + \varepsilon_t
Autocovariances are calculated by solving the associated discrete Lyapunov
equation of the state space representation of the VAR process.
"""
from scipy import linalg
# Convert coefficients to a list of matrices, for use in
# `companion_matrix`; get dimensions
if type(coefficients) == list:
order = len(coefficients)
k_endog = coefficients[0].shape[0]
else:
k_endog, order = coefficients.shape
order //= k_endog
coefficients = [
coefficients[:k_endog, i*k_endog:(i+1)*k_endog]
for i in range(order)
]
if maxlag is None:
maxlag = order-1
# Start with VAR(p): w_{t+1} = phi_1 w_t + ... + phi_p w_{t-p+1} + u_{t+1}
# Then stack the VAR(p) into a VAR(1) in companion matrix form:
# z_{t+1} = F z_t + v_t
companion = companion_matrix(
[1] + [-coefficients[i] for i in range(order)]
).T
# Compute the error variance matrix for the stacked form: E v_t v_t'
selected_variance = np.zeros(companion.shape)
selected_variance[:k_endog, :k_endog] = error_variance
# Compute the unconditional variance of z_t: E z_t z_t'
stacked_cov = linalg.solve_discrete_lyapunov(companion, selected_variance)
# The first (block) row of the variance of z_t gives the first p-1
# autocovariances of w_t: \Gamma_i = E w_t w_t+i with \Gamma_0 = Var(w_t)
# Note: these are okay, checked against ArmaProcess
autocovariances = [
stacked_cov[:k_endog, i*k_endog:(i+1)*k_endog]
for i in range(min(order, maxlag+1))
]
for i in range(maxlag - (order-1)):
stacked_cov = np.dot(companion, stacked_cov)
autocovariances += [
stacked_cov[:k_endog, -k_endog:]
]
if forward_autocovariances:
for i in range(len(autocovariances)):
autocovariances[i] = autocovariances[i].T
return autocovariances
def _compute_multivariate_sample_pacf(endog, maxlag):
"""
Computer multivariate sample partial autocorrelations
Parameters
----------
endog : array_like
Sample data on which to compute sample autocovariances. Shaped
`nobs` x `k_endog`.
maxlag : integer
Maximum lag for which to calculate sample partial autocorrelations.
Returns
-------
sample_pacf : list
A list of the first `maxlag` sample partial autocorrelation matrices.
Each matrix is shaped `k_endog` x `k_endog`.
"""
sample_autocovariances = _compute_multivariate_sample_acovf(endog, maxlag)
return _compute_multivariate_pacf_from_autocovariances(
sample_autocovariances)
def _compute_multivariate_pacf_from_autocovariances(autocovariances,
order=None, k_endog=None):
"""
Compute multivariate partial autocorrelations from autocovariances.
Parameters
----------
autocovariances : list
Autocorrelations matrices. Should be a list of length `order` + 1,
where each element is an array sized `k_endog` x `k_endog`.
order : integer, optional
The order of the autoregression.
k_endog : integer, optional
The dimension of the data vector.
Returns
-------
pacf : list
List of first `order` multivariate partial autocorrelations.
Notes
-----
Note that this computes multivariate partial autocorrelations.
Corresponds to the inverse of Lemma 2.1 in Ansley and Kohn (1986). See
`unconstrain_stationary_multivariate` for more details.
Notes
-----
Computes sample partial autocorrelations if sample autocovariances are
given.
"""
from scipy import linalg
if order is None:
order = len(autocovariances)-1
if k_endog is None:
k_endog = autocovariances[0].shape[0]
# Now apply the Ansley and Kohn (1986) algorithm, except that instead of
# calculating phi_{s+1, s+1} = L_s P_{s+1} {L_s^*}^{-1} (which requires
# the partial autocorrelation P_{s+1} which is what we're trying to
# calculate here), we calculate it as in Ansley and Newbold (1979), using
# the autocovariances \Gamma_s and the forwards and backwards residual
# variances \Sigma_s, \Sigma_s^*:
# phi_{s+1, s+1} = [ \Gamma_{s+1}' - \phi_{s,1} \Gamma_s' - ... -
# \phi_{s,s} \Gamma_1' ] {\Sigma_s^*}^{-1}
# Forward and backward variances
forward_variances = [] # \Sigma_s
backward_variances = [] # \Sigma_s^*, s = 0, ..., p
# \phi_{s,k}, s = 1, ..., p
# k = 1, ..., s+1
forwards = []
# \phi_{s,k}^*
backwards = []
forward_factors = [] # L_s
backward_factors = [] # L_s^*, s = 0, ..., p
# Ultimately we want to construct the partial autocorrelation matrices
# Note that this is "1-indexed" in the sense that it stores P_1, ... P_p
# rather than starting with P_0.
partial_autocorrelations = []
# We fill in the entries of phi_{s,k} as follows:
# [1,1]
# [2,2], [2,1]
# [3,3], [3,1], [3,2]
# ...
# [p,p], [p,1], ..., [p,p-1]
# the last row, correctly ordered, should be the same as the coefficient
# matrices provided in the argument `constrained`
for s in range(order): # s = 0, ..., p-1
prev_forwards = list(forwards)
prev_backwards = list(backwards)
forwards = []
backwards = []
# Create forward and backwards variances Sigma_s, Sigma*_s
forward_variance = autocovariances[0].copy()
backward_variance = autocovariances[0].T.copy()
for k in range(s):
forward_variance -= np.dot(prev_forwards[k],
autocovariances[k+1])
backward_variance -= np.dot(prev_backwards[k],
autocovariances[k+1].T)
forward_variances.append(forward_variance)
backward_variances.append(backward_variance)
# Cholesky factors
forward_factors.append(
linalg.cholesky(forward_variances[s], lower=True)
)
backward_factors.append(
linalg.cholesky(backward_variances[s], lower=True)
)
# Create the intermediate sum term
if s == 0:
# phi_11 = \Gamma_1' \Gamma_0^{-1}
# phi_11 \Gamma_0 = \Gamma_1'
# \Gamma_0 phi_11' = \Gamma_1
forwards.append(linalg.cho_solve(
(forward_factors[0], True), autocovariances[1]).T)
# backwards.append(forwards[-1])
# phi_11_star = \Gamma_1 \Gamma_0^{-1}
# phi_11_star \Gamma_0 = \Gamma_1
# \Gamma_0 phi_11_star' = \Gamma_1'
backwards.append(linalg.cho_solve(
(backward_factors[0], True), autocovariances[1].T).T)
else:
# G := \Gamma_{s+1}' -
# \phi_{s,1} \Gamma_s' - .. - \phi_{s,s} \Gamma_1'
tmp_sum = autocovariances[s+1].T.copy()
for k in range(s):
tmp_sum -= np.dot(prev_forwards[k], autocovariances[s-k].T)
# Create the "last" (k = s+1) matrix
# Note: this is for k = s+1. However, below we then have to
# fill in for k = 1, ..., s in order.
# phi = G Sigma*^{-1}
# phi Sigma* = G
# Sigma*' phi' = G'
# Sigma* phi' = G'
# (because Sigma* is symmetric)
forwards.append(linalg.cho_solve(
(backward_factors[s], True), tmp_sum.T).T)
# phi = G' Sigma^{-1}
# phi Sigma = G'
# Sigma' phi' = G
# Sigma phi' = G
# (because Sigma is symmetric)
backwards.append(linalg.cho_solve(
(forward_factors[s], True), tmp_sum).T)
# Create the remaining k = 1, ..., s matrices,
# only has an effect if s >= 1
for k in range(s):
forwards.insert(k, prev_forwards[k] - np.dot(
forwards[-1], prev_backwards[s-(k+1)]))
backwards.insert(k, prev_backwards[k] - np.dot(
backwards[-1], prev_forwards[s-(k+1)]))
# Partial autocorrelation matrix: P_{s+1}
# P = L^{-1} phi L*
# L P = (phi L*)
partial_autocorrelations.append(linalg.solve_triangular(
forward_factors[s], np.dot(forwards[s], backward_factors[s]),
lower=True))
return partial_autocorrelations
def _compute_multivariate_pacf_from_coefficients(constrained, error_variance,
order=None, k_endog=None):
"""
Transform matrices corresponding to a stationary (or invertible) process
to matrices with singular values less than one.
Parameters
----------
constrained : array or list
The coefficients matrices. If a list, should be a list of length
`order`, where each element is an array sized `k_endog` x `k_endog`. If
an array, should be the coefficient matrices horizontally concatenated
and sized `k_endog` x `k_endog * order`.
error_variance : array
The variance / covariance matrix of the error term. Should be sized
`k_endog` x `k_endog`.
order : integer, optional
The order of the autoregression.
k_endog : integer, optional
The dimension of the data vector.
Returns
-------
pacf : list
List of first `order` multivariate partial autocorrelations.
Notes
-----
Note that this computes multivariate partial autocorrelations.
Corresponds to the inverse of Lemma 2.1 in Ansley and Kohn (1986). See
`unconstrain_stationary_multivariate` for more details.
Notes
-----
Coefficients are assumed to be provided from the VAR model:
.. math::
y_t = A_1 y_{t-1} + \dots + A_p y_{t-p} + \varepsilon_t
"""
if type(constrained) == list:
order = len(constrained)
k_endog = constrained[0].shape[0]
else:
k_endog, order = constrained.shape
order //= k_endog
# Get autocovariances for the process; these are defined to be
# E z_t z_{t-j}'
# However, we want E z_t z_{t+j}' = (E z_t z_{t-j}')'
_acovf = _compute_multivariate_acovf_from_coefficients
autocovariances = [
autocovariance.T for autocovariance in
_acovf(constrained, error_variance, maxlag=order)]
return _compute_multivariate_pacf_from_autocovariances(autocovariances)
def unconstrain_stationary_multivariate(constrained, error_variance):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array or list
Constrained parameters of, e.g., an autoregressive or moving average
component, to be transformed to arbitrary parameters used by the
optimizer. If a list, should be a list of length `order`, where each
element is an array sized `k_endog` x `k_endog`. If an array, should be
the coefficient matrices horizontally concatenated and sized
`k_endog` x `k_endog * order`.
error_variance : array
The variance / covariance matrix of the error term. Should be sized
`k_endog` x `k_endog`. This is used as input in the algorithm even if
is not transformed by it (when `transform_variance` is False).
Returns
-------
unconstrained : array
Unconstrained parameters used by the optimizer, to be transformed to
stationary coefficients of, e.g., an autoregressive or moving average
component. Will match the type of the passed `constrained`
variable (so if a list was passed, a list will be returned).
Notes
-----
Uses the list representation internally, even if an array is passed.
References
----------
.. [1] Ansley, Craig F., and Robert Kohn. 1986.
"A Note on Reparameterizing a Vector Autoregressive Moving Average Model
to Enforce Stationarity."
Journal of Statistical Computation and Simulation 24 (2): 99-106.
"""
use_list = type(constrained) == list
if not use_list:
k_endog, order = constrained.shape
order //= k_endog
constrained = [
constrained[:k_endog, i*k_endog:(i+1)*k_endog]
for i in range(order)
]
else:
order = len(constrained)
k_endog = constrained[0].shape[0]
# Step 1: convert matrices from the space of stationary
# coefficient matrices to our "partial autocorrelation matrix" space
# (matrices with singular values less than one)
partial_autocorrelations = _compute_multivariate_pacf_from_coefficients(
constrained, error_variance, order, k_endog)
# Step 2: convert from arbitrary matrices to those with singular values
# less than one.
unconstrained = _unconstrain_sv_less_than_one(
partial_autocorrelations, order, k_endog)
if not use_list:
unconstrained = np.concatenate(unconstrained, axis=1)
return unconstrained, error_variance
def validate_matrix_shape(name, shape, nrows, ncols, nobs):
"""
Validate the shape of a possibly time-varying matrix, or raise an exception
Parameters
----------
name : str
The name of the matrix being validated (used in exception messages)
shape : array_like
The shape of the matrix to be validated. May be of size 2 or (if
the matrix is time-varying) 3.
nrows : int
The expected number of rows.
ncols : int
The expected number of columns.
nobs : int
The number of observations (used to validate the last dimension of a
time-varying matrix)
Raises
------
ValueError
If the matrix is not of the desired shape.
"""
ndim = len(shape)
# Enforce dimension
if ndim not in [2, 3]:
raise ValueError('Invalid value for %s matrix. Requires a'
' 2- or 3-dimensional array, got %d dimensions' %
(name, ndim))
# Enforce the shape of the matrix
if not shape[0] == nrows:
raise ValueError('Invalid dimensions for %s matrix: requires %d'
' rows, got %d' % (name, nrows, shape[0]))
if not shape[1] == ncols:
raise ValueError('Invalid dimensions for %s matrix: requires %d'
' columns, got %d' % (name, ncols, shape[1]))
# If we don't yet know `nobs`, don't allow time-varying arrays
if nobs is None and not (ndim == 2 or shape[-1] == 1):
raise ValueError('Invalid dimensions for %s matrix: time-varying'
' matrices cannot be given unless `nobs` is specified'
' (implicitly when a dataset is bound or else set'
' explicity)' % name)
# Enforce time-varying array size
if ndim == 3 and nobs is not None and not shape[-1] in [1, nobs]:
raise ValueError('Invalid dimensions for time-varying %s'
' matrix. Requires shape (*,*,%d), got %s' %
(name, nobs, str(shape)))
def validate_vector_shape(name, shape, nrows, nobs):
"""
Validate the shape of a possibly time-varying vector, or raise an exception
Parameters
----------
name : str
The name of the vector being validated (used in exception messages)
shape : array_like
The shape of the vector to be validated. May be of size 1 or (if
the vector is time-varying) 2.
nrows : int
The expected number of rows (elements of the vector).
nobs : int
The number of observations (used to validate the last dimension of a
time-varying vector)
Raises
------
ValueError
If the vector is not of the desired shape.
"""
ndim = len(shape)
# Enforce dimension
if ndim not in [1, 2]:
raise ValueError('Invalid value for %s vector. Requires a'
' 1- or 2-dimensional array, got %d dimensions' %
(name, ndim))
# Enforce the shape of the vector
if not shape[0] == nrows:
raise ValueError('Invalid dimensions for %s vector: requires %d'
' rows, got %d' % (name, nrows, shape[0]))
# If we don't yet know `nobs`, don't allow time-varying arrays
if nobs is None and not (ndim == 1 or shape[-1] == 1):
raise ValueError('Invalid dimensions for %s vector: time-varying'
' vectors cannot be given unless `nobs` is specified'
' (implicitly when a dataset is bound or else set'
' explicity)' % name)
# Enforce time-varying array size
if ndim == 2 and not shape[1] in [1, nobs]:
raise ValueError('Invalid dimensions for time-varying %s'
' vector. Requires shape (*,%d), got %s' %
(name, nobs, str(shape)))
def reorder_missing_matrix(matrix, missing, reorder_rows=False,
reorder_cols=False, is_diagonal=False,
inplace=False, prefix=None):
"""
Reorder the rows or columns of a time-varying matrix where all non-missing
values are in the upper left corner of the matrix.
Parameters
----------
matrix : array_like
The matrix to be reordered. Must have shape (n, m, nobs).
missing : array_like of bool
The vector of missing indices. Must have shape (k, nobs) where `k = n`
if `reorder_rows is True` and `k = m` if `reorder_cols is True`.
reorder_rows : bool, optional
Whether or not the rows of the matrix should be re-ordered. Default
is False.
reorder_cols : bool, optional
Whether or not the columns of the matrix should be re-ordered. Default
is False.
is_diagonal : bool, optional
Whether or not the matrix is diagonal. If this is True, must also have
`n = m`. Default is False.
inplace : bool, optional
Whether or not to reorder the matrix in-place.
prefix : {'s', 'd', 'c', 'z'}, optional
The Fortran prefix of the vector. Default is to automatically detect
the dtype. This parameter should only be used with caution.
Returns
-------
reordered_matrix : array_like
The reordered matrix.
"""
if prefix is None:
prefix = find_best_blas_type((matrix,))[0]
reorder = prefix_reorder_missing_matrix_map[prefix]
if not inplace:
matrix = np.copy(matrix, order='F')
reorder(matrix, np.asfortranarray(missing), reorder_rows, reorder_cols,
is_diagonal)
return matrix
def reorder_missing_vector(vector, missing, inplace=False, prefix=None):
"""
Reorder the elements of a time-varying vector where all non-missing
values are in the first elements of the vector.
Parameters
----------
vector : array_like
The vector to be reordered. Must have shape (n, nobs).
missing : array_like of bool
The vector of missing indices. Must have shape (n, nobs).
inplace : bool, optional
Whether or not to reorder the matrix in-place. Default is False.
prefix : {'s', 'd', 'c', 'z'}, optional
The Fortran prefix of the vector. Default is to automatically detect
the dtype. This parameter should only be used with caution.
Returns
-------
reordered_vector : array_like
The reordered vector.
"""
if prefix is None:
prefix = find_best_blas_type((vector,))[0]
reorder = prefix_reorder_missing_vector_map[prefix]
if not inplace:
vector = np.copy(vector, order='F')
reorder(vector, np.asfortranarray(missing))
return vector
def copy_missing_matrix(A, B, missing, missing_rows=False, missing_cols=False,
is_diagonal=False, inplace=False, prefix=None):
"""
Copy the rows or columns of a time-varying matrix where all non-missing
values are in the upper left corner of the matrix.
Parameters
----------
A : array_like
The matrix from which to copy. Must have shape (n, m, nobs) or
(n, m, 1).
B : array_like
The matrix to copy to. Must have shape (n, m, nobs).
missing : array_like of bool
The vector of missing indices. Must have shape (k, nobs) where `k = n`
if `reorder_rows is True` and `k = m` if `reorder_cols is True`.
missing_rows : bool, optional
Whether or not the rows of the matrix are a missing dimension. Default
is False.
missing_cols : bool, optional
Whether or not the columns of the matrix are a missing dimension.
Default is False.
is_diagonal : bool, optional
Whether or not the matrix is diagonal. If this is True, must also have
`n = m`. Default is False.
inplace : bool, optional
Whether or not to copy to B in-place. Default is False.
prefix : {'s', 'd', 'c', 'z'}, optional
The Fortran prefix of the vector. Default is to automatically detect
the dtype. This parameter should only be used with caution.
Returns
-------
copied_matrix : array_like
The matrix B with the non-missing submatrix of A copied onto it.
"""
if prefix is None:
prefix = find_best_blas_type((A, B))[0]
copy = prefix_copy_missing_matrix_map[prefix]
if not inplace:
B = np.copy(B, order='F')
# We may have been given an F-contiguous memoryview; in that case, we don't
# want to alter it or convert it to a numpy array
try:
if not A.is_f_contig():
raise ValueError()
except:
A = np.asfortranarray(A)
copy(A, B, np.asfortranarray(missing), missing_rows, missing_cols,
is_diagonal)
return B
def copy_missing_vector(a, b, missing, inplace=False, prefix=None):
"""
Reorder the elements of a time-varying vector where all non-missing
values are in the first elements of the vector.
Parameters
----------
a : array_like
The vector from which to copy. Must have shape (n, nobs) or (n, 1).
b : array_like
The vector to copy to. Must have shape (n, nobs).
missing : array_like of bool
The vector of missing indices. Must have shape (n, nobs).
inplace : bool, optional
Whether or not to copy to b in-place. Default is False.
prefix : {'s', 'd', 'c', 'z'}, optional
The Fortran prefix of the vector. Default is to automatically detect
the dtype. This parameter should only be used with caution.
Returns
-------
copied_vector : array_like
The vector b with the non-missing subvector of b copied onto it.
"""
if prefix is None:
prefix = find_best_blas_type((a, b))[0]
copy = prefix_copy_missing_vector_map[prefix]
if not inplace:
b = np.copy(b, order='F')
# We may have been given an F-contiguous memoryview; in that case, we don't
# want to alter it or convert it to a numpy array
try:
if not a.is_f_contig():
raise ValueError()
except:
a = np.asfortranarray(a)
copy(a, b, np.asfortranarray(missing))
return b
def copy_index_matrix(A, B, index, index_rows=False, index_cols=False,
is_diagonal=False, inplace=False, prefix=None):
"""
Copy the rows or columns of a time-varying matrix where all non-index
values are in the upper left corner of the matrix.
Parameters
----------
A : array_like
The matrix from which to copy. Must have shape (n, m, nobs) or
(n, m, 1).
B : array_like
The matrix to copy to. Must have shape (n, m, nobs).
index : array_like of bool
The vector of index indices. Must have shape (k, nobs) where `k = n`
if `reorder_rows is True` and `k = m` if `reorder_cols is True`.
index_rows : bool, optional
Whether or not the rows of the matrix are a index dimension. Default
is False.
index_cols : bool, optional
Whether or not the columns of the matrix are a index dimension.
Default is False.
is_diagonal : bool, optional
Whether or not the matrix is diagonal. If this is True, must also have
`n = m`. Default is False.
inplace : bool, optional
Whether or not to copy to B in-place. Default is False.
prefix : {'s', 'd', 'c', 'z'}, optional
The Fortran prefix of the vector. Default is to automatically detect
the dtype. This parameter should only be used with caution.
Returns
-------
copied_matrix : array_like
The matrix B with the non-index submatrix of A copied onto it.
"""
if prefix is None:
prefix = find_best_blas_type((A, B))[0]
copy = prefix_copy_index_matrix_map[prefix]
if not inplace:
B = np.copy(B, order='F')
# We may have been given an F-contiguous memoryview; in that case, we don't
# want to alter it or convert it to a numpy array
try:
if not A.is_f_contig():
raise ValueError()
except:
A = np.asfortranarray(A)
copy(A, B, np.asfortranarray(index), index_rows, index_cols,
is_diagonal)
return B
def copy_index_vector(a, b, index, inplace=False, prefix=None):
"""
Reorder the elements of a time-varying vector where all non-index
values are in the first elements of the vector.
Parameters
----------
a : array_like
The vector from which to copy. Must have shape (n, nobs) or (n, 1).
b : array_like
The vector to copy to. Must have shape (n, nobs).
index : array_like of bool
The vector of index indices. Must have shape (n, nobs).
inplace : bool, optional
Whether or not to copy to b in-place. Default is False.
prefix : {'s', 'd', 'c', 'z'}, optional
The Fortran prefix of the vector. Default is to automatically detect
the dtype. This parameter should only be used with caution.
Returns
-------
copied_vector : array_like
The vector b with the non-index subvector of b copied onto it.
"""
if prefix is None:
prefix = find_best_blas_type((a, b))[0]
copy = prefix_copy_index_vector_map[prefix]
if not inplace:
b = np.copy(b, order='F')
# We may have been given an F-contiguous memoryview; in that case, we don't
# want to alter it or convert it to a numpy array
try:
if not a.is_f_contig():
raise ValueError()
except:
a = np.asfortranarray(a)
copy(a, b, np.asfortranarray(index))
return b
| bsd-3-clause |
pompiduskus/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
r-mart/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
rohit21122012/DCASE2013 | runs/2013/xgboost100/src/evaluation.py | 38 | 42838 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import numpy
import math
from sklearn import metrics
class DCASE2016_SceneClassification_Metrics():
"""DCASE 2016 scene classification metrics
Examples
--------
>>> dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> y_true = []
>>> y_pred = []
>>> for result in results:
>>> y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
>>> y_pred.append(result[1])
>>>
>>> dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
>>>
>>> results = dcase2016_scene_metric.results()
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
Evaluated scene labels in the list
"""
self.accuracies_per_class = None
self.Nsys = None
self.Nref = None
self.class_list = class_list
self.eps = numpy.spacing(1)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
return self.results()
def accuracies(self, y_true, y_pred, labels):
"""Calculate accuracy
Parameters
----------
y_true : numpy.array
Ground truth array, list of scene labels
y_pred : numpy.array
System output array, list of scene labels
labels : list
list of scene labels
Returns
-------
array : numpy.array [shape=(number of scene labels,)]
Accuracy per scene label class
"""
confusion_matrix = metrics.confusion_matrix(y_true=y_true, y_pred=y_pred, labels=labels).astype(float)
#print confusion_matrix
temp = numpy.divide(numpy.diag(confusion_matrix), numpy.sum(confusion_matrix, 1)+self.eps)
#print temp
return temp
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
accuracies_per_class = self.accuracies(y_pred=system_output, y_true=annotated_ground_truth, labels=self.class_list)
if self.accuracies_per_class is None:
self.accuracies_per_class = accuracies_per_class
else:
self.accuracies_per_class = numpy.vstack((self.accuracies_per_class, accuracies_per_class))
Nref = numpy.zeros(len(self.class_list))
Nsys = numpy.zeros(len(self.class_list))
for class_id, class_label in enumerate(self.class_list):
for item in system_output:
if item == class_label:
Nsys[class_id] += 1
for item in annotated_ground_truth:
if item == class_label:
Nref[class_id] += 1
if self.Nref is None:
self.Nref = Nref
else:
self.Nref = numpy.vstack((self.Nref, Nref))
if self.Nsys is None:
self.Nsys = Nsys
else:
self.Nsys = numpy.vstack((self.Nsys, Nsys))
def results(self):
"""Get results
Outputs results in dict, format:
{
'class_wise_data':
{
'office': {
'Nsys': 10,
'Nref': 7,
},
}
'class_wise_accuracy':
{
'office': 0.6,
'home': 0.4,
}
'overall_accuracy': numpy.mean(self.accuracies_per_class)
'Nsys': 100,
'Nref': 100,
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'class_wise_data': {},
'class_wise_accuracy': {},
'overall_accuracy': numpy.mean(self.accuracies_per_class)
}
if len(self.Nsys.shape) == 2:
results['Nsys'] = int(sum(sum(self.Nsys)))
results['Nref'] = int(sum(sum(self.Nref)))
else:
results['Nsys'] = int(sum(self.Nsys))
results['Nref'] = int(sum(self.Nref))
for class_id, class_label in enumerate(self.class_list):
if len(self.accuracies_per_class.shape) == 2:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[:, class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(sum(self.Nsys[:, class_id])),
'Nref': int(sum(self.Nref[:, class_id])),
}
else:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(self.Nsys[class_id]),
'Nref': int(self.Nref[class_id]),
}
return results
class EventDetectionMetrics(object):
"""Baseclass for sound event metric classes.
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
"""
self.class_list = class_list
self.eps = numpy.spacing(1)
def max_event_offset(self, data):
"""Get maximum event offset from event list
Parameters
----------
data : list
Event list, list of event dicts
Returns
-------
max : float > 0
Maximum event offset
"""
max = 0
for event in data:
if event['event_offset'] > max:
max = event['event_offset']
return max
def list_to_roll(self, data, time_resolution=0.01):
"""Convert event list into event roll.
Event roll is binary matrix indicating event activity withing time segment defined by time_resolution.
Parameters
----------
data : list
Event list, list of event dicts
time_resolution : float > 0
Time resolution used when converting event into event roll.
Returns
-------
event_roll : numpy.ndarray [shape=(math.ceil(data_length * 1 / time_resolution) + 1, amount of classes)]
Event roll
"""
# Initialize
data_length = self.max_event_offset(data)
event_roll = numpy.zeros((math.ceil(data_length * 1 / time_resolution) + 1, len(self.class_list)))
# Fill-in event_roll
for event in data:
pos = self.class_list.index(event['event_label'].rstrip())
onset = math.floor(event['event_onset'] * 1 / time_resolution)
offset = math.ceil(event['event_offset'] * 1 / time_resolution) + 1
event_roll[onset:offset, pos] = 1
return event_roll
class DCASE2016_EventDetection_SegmentBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Segment based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_segment_based_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_segment_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['segment_based_metrics'] = dcase2016_segment_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
"""
self.time_resolution = time_resolution
self.overall = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
'ER': 0.0,
'S': 0.0,
'D': 0.0,
'I': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=self.time_resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=self.time_resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute segment-based overall metrics
for segment_id in range(0, annotated_event_roll.shape[0]):
annotated_segment = annotated_event_roll[segment_id, :]
system_segment = system_event_roll[segment_id, :]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
S = min(Nref, Nsys) - Ntp
D = max(0, Nref - Nsys)
I = max(0, Nsys - Nref)
ER = max(Nref, Nsys) - Ntp
self.overall['Ntp'] += Ntp
self.overall['Ntn'] += Ntn
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['S'] += S
self.overall['D'] += D
self.overall['I'] += I
self.overall['ER'] += ER
for class_id, class_label in enumerate(self.class_list):
annotated_segment = annotated_event_roll[:, class_id]
system_segment = system_event_roll[:, class_id]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Ntn'] += Ntn
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
return self
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = self.overall['ER'] / self.overall['Nref']
results['overall']['S'] = self.overall['S'] / self.overall['Nref']
results['overall']['D'] = self.overall['D'] / self.overall['Nref']
results['overall']['I'] = self.overall['I'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_id, class_label in enumerate(self.class_list):
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * ((results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn'] + self.class_wise[class_label]['Nfp']) / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2016_EventDetection_EventBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Event based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_event_based_metric = DCASE2016_EventDetection_EventBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_event_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['event_based_metrics'] = dcase2016_event_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0, t_collar=0.2):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
t_collar : float > 0
Time collar for event onset and offset condition
(Default value = 0.2)
"""
self.time_resolution = time_resolution
self.t_collar = t_collar
self.overall = {
'Nref': 0.0,
'Nsys': 0.0,
'Nsubs': 0.0,
'Ntp': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Nref': 0.0,
'Nsys': 0.0,
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Overall metrics
# Total number of detected and reference events
Nsys = len(system_output)
Nref = len(annotated_ground_truth)
sys_correct = numpy.zeros(Nsys, dtype=bool)
ref_correct = numpy.zeros(Nref, dtype=bool)
# Number of correctly transcribed events, onset/offset within a t_collar range
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
label_condition = annotated_ground_truth[j]['event_label'] == system_output[i]['event_label']
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if label_condition and onset_condition and offset_condition:
ref_correct[j] = True
sys_correct[i] = True
break
Ntp = numpy.sum(sys_correct)
sys_leftover = numpy.nonzero(numpy.negative(sys_correct))[0]
ref_leftover = numpy.nonzero(numpy.negative(ref_correct))[0]
# Substitutions
Nsubs = 0
for j in ref_leftover:
for i in sys_leftover:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Nsubs += 1
break
Nfp = Nsys - Ntp - Nsubs
Nfn = Nref - Ntp - Nsubs
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['Ntp'] += Ntp
self.overall['Nsubs'] += Nsubs
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
# Class-wise metrics
for class_id, class_label in enumerate(self.class_list):
Nref = 0.0
Nsys = 0.0
Ntp = 0.0
# Count event frequencies in the ground truth
for i in range(0, len(annotated_ground_truth)):
if annotated_ground_truth[i]['event_label'] == class_label:
Nref += 1
# Count event frequencies in the system output
for i in range(0, len(system_output)):
if system_output[i]['event_label'] == class_label:
Nsys += 1
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == class_label and system_output[i]['event_label'] == class_label:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Ntp += 1
break
Nfp = Nsys - Ntp
Nfn = Nref - Ntp
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
def onset_condition(self, annotated_event, system_event, t_collar=0.200):
"""Onset condition, checked does the event pair fulfill condition
Condition:
- event onsets are within t_collar each other
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
Returns
-------
result : bool
Condition result
"""
return math.fabs(annotated_event['event_onset'] - system_event['event_onset']) <= t_collar
def offset_condition(self, annotated_event, system_event, t_collar=0.200, percentage_of_length=0.5):
"""Offset condition, checking does the event pair fulfill condition
Condition:
- event offsets are within t_collar each other
or
- system event offset is within the percentage_of_length*annotated event_length
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
percentage_of_length : float [0-1]
Returns
-------
result : bool
Condition result
"""
annotated_length = annotated_event['event_offset'] - annotated_event['event_onset']
return math.fabs(annotated_event['event_offset'] - system_event['event_offset']) <= max(t_collar, percentage_of_length * annotated_length)
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = (self.overall['Nfn'] + self.overall['Nfp'] + self.overall['Nsubs']) / self.overall['Nref']
results['overall']['S'] = self.overall['Nsubs'] / self.overall['Nref']
results['overall']['D'] = self.overall['Nfn'] / self.overall['Nref']
results['overall']['I'] = self.overall['Nfp'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_label in self.class_list:
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * ((results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn']+self.class_wise[class_label]['Nfp']) / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
# Class-wise average
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2013_EventDetection_Metrics(EventDetectionMetrics):
"""Lecagy DCASE2013 metrics, converted from the provided Matlab implementation
Supported metrics:
- Frame based
- F-score (F)
- AEER
- Event based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
- Class based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
"""
#
def frame_based(self, annotated_ground_truth, system_output, resolution=0.01):
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute frame-based metrics
Nref = sum(sum(annotated_event_roll))
Ntot = sum(sum(system_event_roll))
Ntp = sum(sum(system_event_roll + annotated_event_roll > 1))
Nfp = sum(sum(system_event_roll - annotated_event_roll > 0))
Nfn = sum(sum(annotated_event_roll - system_event_roll > 0))
Nsubs = min(Nfp, Nfn)
eps = numpy.spacing(1)
results = dict()
results['Rec'] = Ntp / (Nref + eps)
results['Pre'] = Ntp / (Ntot + eps)
results['F'] = 2 * ((results['Pre'] * results['Rec']) / (results['Pre'] + results['Rec'] + eps))
results['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
return results
def event_based(self, annotated_ground_truth, system_output):
# Event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events
Ntot = len(system_output)
Nref = len(annotated_ground_truth)
# Number of correctly transcribed events, onset within a +/-100 ms range
Ncorr = 0
NcorrOff = 0
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (math.fabs(annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
Ncorr += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(0.1, 0.5 * (annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j]['event_onset'])):
NcorrOff += 1
break # In order to not evaluate duplicates
# Compute onset-only event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = min(Nfp, Nfn)
results['onset']['Rec'] = Ncorr / (Nref + eps)
results['onset']['Pre'] = Ncorr / (Ntot + eps)
results['onset']['F'] = 2 * (
(results['onset']['Pre'] * results['onset']['Rec']) / (
results['onset']['Pre'] + results['onset']['Rec'] + eps))
results['onset']['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
# Compute onset-offset event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = min(NfpOff, NfnOff)
results['onset-offset']['Rec'] = NcorrOff / (Nref + eps)
results['onset-offset']['Pre'] = NcorrOff / (Ntot + eps)
results['onset-offset']['F'] = 2 * ((results['onset-offset']['Pre'] * results['onset-offset']['Rec']) / (
results['onset-offset']['Pre'] + results['onset-offset']['Rec'] + eps))
results['onset-offset']['AEER'] = (NfnOff + NfpOff + NsubsOff) / (Nref + eps)
return results
def class_based(self, annotated_ground_truth, system_output):
# Class-wise event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events per class
Ntot = numpy.zeros((len(self.class_list), 1))
for event in system_output:
pos = self.class_list.index(event['event_label'])
Ntot[pos] += 1
Nref = numpy.zeros((len(self.class_list), 1))
for event in annotated_ground_truth:
pos = self.class_list.index(event['event_label'])
Nref[pos] += 1
I = (Nref > 0).nonzero()[0] # index for classes present in ground-truth
# Number of correctly transcribed events per class, onset within a +/-100 ms range
Ncorr = numpy.zeros((len(self.class_list), 1))
NcorrOff = numpy.zeros((len(self.class_list), 1))
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (
math.fabs(
annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
pos = self.class_list.index(system_output[i]['event_label'])
Ncorr[pos] += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(
0.1, 0.5 * (
annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j][
'event_onset'])):
pos = self.class_list.index(system_output[i]['event_label'])
NcorrOff[pos] += 1
break # In order to not evaluate duplicates
# Compute onset-only class-wise event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = numpy.minimum(Nfp, Nfn)
tempRec = Ncorr[I] / (Nref[I] + eps)
tempPre = Ncorr[I] / (Ntot[I] + eps)
results['onset']['Rec'] = numpy.mean(tempRec)
results['onset']['Pre'] = numpy.mean(tempPre)
tempF = 2 * ((tempPre * tempRec) / (tempPre + tempRec + eps))
results['onset']['F'] = numpy.mean(tempF)
tempAEER = (Nfn[I] + Nfp[I] + Nsubs[I]) / (Nref[I] + eps)
results['onset']['AEER'] = numpy.mean(tempAEER)
# Compute onset-offset class-wise event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = numpy.minimum(NfpOff, NfnOff)
tempRecOff = NcorrOff[I] / (Nref[I] + eps)
tempPreOff = NcorrOff[I] / (Ntot[I] + eps)
results['onset-offset']['Rec'] = numpy.mean(tempRecOff)
results['onset-offset']['Pre'] = numpy.mean(tempPreOff)
tempFOff = 2 * ((tempPreOff * tempRecOff) / (tempPreOff + tempRecOff + eps))
results['onset-offset']['F'] = numpy.mean(tempFOff)
tempAEEROff = (NfnOff[I] + NfpOff[I] + NsubsOff[I]) / (Nref[I] + eps)
results['onset-offset']['AEER'] = numpy.mean(tempAEEROff)
return results
def main(argv):
# Examples to show usage and required data structures
class_list = ['class1', 'class2', 'class3']
system_output = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.1,
'event_offset': 4.7
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
annotated_groundtruth = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.2,
'event_offset': 5.4
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
dcase2013metric = DCASE2013_EventDetection_Metrics(class_list=class_list)
print 'DCASE2013'
print 'Frame-based:', dcase2013metric.frame_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Event-based:', dcase2013metric.event_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Class-based:', dcase2013metric.class_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
dcase2016_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=class_list)
print 'DCASE2016'
print dcase2016_metric.evaluate(system_output=system_output, annotated_ground_truth=annotated_groundtruth).results()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| mit |
eadgarchen/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_functions.py | 10 | 18972 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import types as tp
import numpy as np
import six
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _fill_array(arr, seq, fillvalue=0):
"""
Recursively fills padded arr with elements from seq.
If length of seq is less than arr padded length, fillvalue used.
Args:
arr: Padded tensor of shape [batch_size, ..., max_padded_dim_len].
seq: Non-padded list of data sampels of shape
[batch_size, ..., padded_dim(None)]
fillvalue: Default fillvalue to use.
"""
if arr.ndim == 1:
try:
len_ = len(seq)
except TypeError:
len_ = 0
arr[:len_] = seq
arr[len_:] = fillvalue
else:
for subarr, subseq in six.moves.zip_longest(arr, seq, fillvalue=()):
_fill_array(subarr, subseq, fillvalue)
def _pad_if_needed(batch_key_item, fillvalue=0):
""" Returns padded batch.
Args:
batch_key_item: List of data samples of any type with shape
[batch_size, ..., padded_dim(None)].
fillvalue: Default fillvalue to use.
Returns:
Padded with zeros tensor of same type and shape
[batch_size, ..., max_padded_dim_len].
Raises:
ValueError if data samples have different shapes (except last padded dim).
"""
shapes = [seq.shape[:-1] if len(seq.shape) > 0 else -1
for seq in batch_key_item]
if not all(shapes[0] == x for x in shapes):
raise ValueError("Array shapes must match.")
last_length = [seq.shape[-1] if len(seq.shape) > 0 else 0
for seq in batch_key_item]
if all([x == last_length[0] for x in last_length]):
return batch_key_item
batch_size = len(batch_key_item)
max_sequence_length = max(last_length)
result_batch = np.zeros(
shape=[batch_size] + list(shapes[0]) + [max_sequence_length],
dtype=batch_key_item[0].dtype)
_fill_array(result_batch, batch_key_item, fillvalue)
return result_batch
def _get_integer_indices_for_next_batch(
batch_indices_start, batch_size, epoch_end, array_length,
current_epoch, total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [j % array_length for j in
range(batch_indices_start, batch_indices_end)]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
class _GeneratorFeedFn(object):
"""Creates feed dictionaries from `Generator` of `dicts` of numpy arrays."""
def __init__(self,
placeholders,
generator,
batch_size,
random_start=False,
seed=None,
num_epochs=None,
pad_value=None):
first_sample = next(generator())
if len(placeholders) != len(first_sample):
raise ValueError("Expected {} placeholders; got {}.".format(
len(first_sample), len(placeholders)))
self._keys = sorted(list(first_sample.keys()))
self._col_placeholders = placeholders
self._generator_function = generator
self._iterator = generator()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
self._pad_value = pad_value
random.seed(seed)
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun "
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index],
list()).append(data_row[key])
list_dict_size += 1
if self._pad_value is not None:
feed_dict = {key: np.asarray(_pad_if_needed(item, self._pad_value))
for key, item in list(list_dict.items())}
else:
feed_dict = {key: np.asarray(item)
for key, item in list(list_dict.items())}
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None,
pad_value=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
pad_value: default value for dynamic padding of data samples, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays, a numpy `ndarray`, or a generator producing these.
NotImplementedError: padding and shuffling data at the same time.
NotImplementedError: padding usage with non generator data type.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif isinstance(data, tp.FunctionType):
x_first_el = six.next(data())
x_first_keys = sorted(x_first_el.keys())
x_first_values = [x_first_el[key] for key in x_first_keys]
types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
queue_shapes = [col.shape for col in x_first_values]
get_feed_fn = _GeneratorFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
pad_data = pad_value is not None
if pad_data and get_feed_fn is not _GeneratorFeedFn:
raise NotImplementedError(
"padding is only available with generator usage")
if shuffle and pad_data:
raise NotImplementedError(
"padding and shuffling data at the same time is not implemented")
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
elif pad_data:
min_after_dequeue = 0 # just for the summary text
queue_shapes = list(map(
lambda x: tuple(list(x[:-1]) + [None]) if len(x) > 0 else x,
queue_shapes))
queue = data_flow_ops.PaddingFIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
if not pad_data:
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
else:
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs,
pad_value=pad_value))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| apache-2.0 |
fbagirov/scikit-learn | examples/neighbors/plot_regression.py | 349 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
acdh-oeaw/dig_ed_cat | charts/ml_views.py | 1 | 1520 | import collections
import json
import pandas as pd
import numpy as np
from django.shortcuts import render
from django.http import JsonResponse
from collections import Counter
from sklearn.cluster import KMeans
from django.contrib.contenttypes.models import ContentType
from browsing.views import serialize
from editions.models import Edition
def hashed(string):
return hash(string)
def kmeans_json(request):
selected_class = ContentType.objects.get(app_label="editions", model="edition")
value_list = []
for x in selected_class.get_all_objects_for_this_type():
values = serialize(x)
value_list.append(values)
headers = [f.name for f in Edition._meta.get_fields()]
df = pd.DataFrame(value_list, columns=headers)
df.index = pd.DataFrame(df[df.columns[2]].tolist())
df = df[['scholarly', 'digital', 'edition', 'api', 'language']]
df = df.applymap(hashed)
X = np.array(df)
kmeans = KMeans(random_state=0).fit(X)
cluster = dict(collections.Counter(kmeans.labels_))
payload = []
for key, value in cluster.items():
payload.append([int(key), int(value)])
data = {
"items": len(Edition.objects.all()),
"title": "Kmeans of Editions (experimental)",
"subtitle": "Vectorizes the cataloge entries and clusters them by 'k-means'.",
"legendx": "Cluster",
"legendy": "# of Editions",
"measuredObject": "Editions",
"ymin": 0,
"payload": payload
}
return JsonResponse(data)
| mit |
ngoix/OCRF | examples/semi_supervised/plot_label_propagation_structure.py | 45 | 2433 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plt.scatter(X[labels == outer, 0], X[labels == outer, 1], color='navy',
marker='s', lw=0, label="outer labeled", s=10)
plt.scatter(X[labels == inner, 0], X[labels == inner, 1], color='c',
marker='s', lw=0, label='inner labeled', s=10)
plt.scatter(X[labels == -1, 0], X[labels == -1, 1], color='darkorange',
marker='.', label='unlabeled')
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Raw data (2 classes=outer and inner)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.scatter(X[outer_numbers, 0], X[outer_numbers, 1], color='navy',
marker='s', lw=0, s=10, label="outer learned")
plt.scatter(X[inner_numbers, 0], X[inner_numbers, 1], color='c',
marker='s', lw=0, s=10, label="inner learned")
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
mfergie/human-hive | humanhive/swarm.py | 1 | 12847 | """Module for managing swarm positions"""
import math
import numpy as np
from sklearn.metrics.pairwise import rbf_kernel
class CosSinSwarm:
"""
A toy module for doing 2 channel volumes using cos/sin functions.
"""
def __init__(self, sample_rate):
self.sample_rate = sample_rate
self.start_theta = 0
self.period = 20
def sample_swarm_volumes(self, frame_count):
end_theta = (self.start_theta +
((frame_count / (self.sample_rate*self.period)) * 2*math.pi))
theta = np.linspace(self.start_theta, end_theta, frame_count)
self.start_theta = end_theta
left_vol = np.cos(theta)
right_vol = np.sin(theta)
return np.hstack((left_vol[:,np.newaxis], right_vol[:,np.newaxis]))
class SwarmLinear:
def __init__(self,
hives,
swarm_speed,
sample_rate,
p_change_direction=0.2,
p_jump_hives=0.1):
"""
Initialise a swarm.
Parameters
----------
hives: array_like, (H, 2)
An array containing the 2D positions of each hive.
H denotes the number of hives.
swarm_speed: float
The speed that the swarm will travel. This is given in units/second
where the units are the same as those used for the hive positions.
sample_rate: int
The sample rate.
"""
self.hives = np.asarray(hives)
self.n_hives = len(hives)
self.swarm_speed = swarm_speed
# Compute how far the swarm should travel per frame
self.swarm_speed_upf = self.swarm_speed / sample_rate
self.sample_rate = sample_rate
self.p_change_direction = p_change_direction
self.p_jump_hives = p_jump_hives
# Initialise movement. Set initial position to hive 0, and set them off
# towards hive 1
self.swarm_position = self.hives[0]
self.destination_hive = 1
# n_linger_frames stores the number of frames that the swarm has remaining
# to linger at the current position.
self.n_linger_frames = 0
# Stores the direction of the swarm movement as +1 or -1. This is
# randomly sampled from p_change_direction
self.swarm_direction = 1
def sample_swarm_positions(self, n_samples):
"""
Samples the position of a swarm as it moves around the
circle at random for a given duration and sample rate.
Updates the internal state of the object with the new
swarm position such that subsequent calls will progress
the swarm on its path.
Parameters
----------
duration: float
The duration in seconds for which to generate the samples.
n_samples: int
The number of samples to generate.
Returns
-------
positions: array_like, (N, 2)
Returns N = duration * sample_rate samples for the position
of the swarm. Position is given by the angle of the swarm
"""
# Compute the frame index that movement will start at.
if self.n_linger_frames > n_samples:
# Still lingering, set movement_start_frame to n_samples
movement_start_frame = n_samples
else:
movement_start_frame = self.n_linger_frames
# Compute the frame index that movement will end at
swarm_destination_position = self.hives[self.destination_hive]
destination_vector = (
swarm_destination_position - self.swarm_position)
# movement_per_frame_vector gives the offset for each frame of
# movement.
movement_per_frame_vector = (
(destination_vector / np.linalg.norm(destination_vector)) *
self.swarm_speed_upf)
# Compute the frame that we would reach destination by projecting
# the destination point onto the movement vector
destination_frame = int(np.ceil(
(swarm_destination_position[0] - self.swarm_position[0]) /
movement_per_frame_vector[0]))
n_remaining_frames = n_samples - movement_start_frame
if destination_frame > n_remaining_frames:
movement_end_frame = n_samples
else:
movement_end_frame = movement_start_frame + destination_frame
# Now set the positions
positions = np.zeros((n_samples, 2), dtype=np.float32)
# Movement start
positions[:movement_start_frame] = self.swarm_position[np.newaxis,:]
# Movement middle
frame_indices = np.arange(1, (movement_end_frame - movement_start_frame) + 1)
relative_positions = (
movement_per_frame_vector[np.newaxis,:] * frame_indices[:,np.newaxis])
positions[movement_start_frame:movement_end_frame] = (
relative_positions + self.swarm_position)
# Movement end, copy last movement position to finish
positions[movement_end_frame:] = positions[movement_end_frame-1]
###
# Now update internal state
###
self.swarm_position = positions[-1]
if movement_end_frame < n_samples:
# We reached destination, sample new destination hive
# Change direction?
if np.random.rand(1)[0] < self.p_change_direction:
# Yes, change direction
self.swarm_direction *= -1
print("Swapping direction: {}".format(self.swarm_direction))
self.destination_hive = (self.destination_hive + self.swarm_direction) % self.n_hives
# Jump hives?
if np.random.rand(1)[0] < self.p_jump_hives:
# Yes, generate a random destination hive
self.destination_hive = np.random.randint(self.n_hives)
print("Jumping hives, new destination: {}".format(
self.destination_hive))
# Sample a linger time
self.n_linger_frames = (
3 * self.sample_rate - (n_samples - movement_end_frame))
print("swarm_position: {}, destination_hive: {}, n_linger_frames: {}".format(
self.swarm_position, self.destination_hive, self.n_linger_frames))
if movement_start_frame > 0:
self.n_linger_frames -= movement_start_frame
return positions
def sample_swarm_volumes(self, n_samples):
swarm_positions = self.sample_swarm_positions(n_samples)
# print(swarm_positions)
# print("Position: {}".format(swarm_positions[0]), end=", ")
return hive_volumes(self.hives, swarm_positions)
class Swarm:
def __init__(self, hive_radius, hives, swarm_speed, sample_rate):
"""
Initialise a swarm.
Parameters
----------
hive_radius: float
If the hives are in a circular arrangement, this
is the radius of the circle in m.
hives: array_like, (H, 2)
An array containing the 2D positions of each hive.
H denotes the number of hives.
swarm_speed: float
The speed that the swarm will travel.
sample_rate: int
The sample rate.
"""
self.hive_radius = hive_radius
self.hives = hives
self.n_hives = len(hives)
self.swarm_speed = swarm_speed
print(swarm_speed)
self.swarm_speed_rad = swarm_speed/hive_radius
self.sample_rate = sample_rate
# Now initialise position
#
# Perhaps the simplest way is to give the swarm position as angle
# from some reference point
self.swarm_position = 0
def sample_swarm_positions(self, n_samples):
"""
Samples the position of a swarm as it moves around the
circle at random for a given duration and sample rate.
Updates the internal state of the object with the new
swarm position such that subsequent calls will progress
the swarm on its path.
Parameters
----------
duration: float
The duration in seconds for which to generate the samples.
n_samples: int
The number of samples to generate.
Returns
-------
positions: array_like, (N, 2)
Returns N = duration * sample_rate samples for the position
of the swarm. Position is given by the angle of the swarm
"""
##########################################
# NOTE: I THINK THIS SHOULD BE MORE ROBUST IF WE INPUT SAMPLE RATE AND
# NUMBER OF SAMPLES, RATHER THAN DURATION.
##########################################
# Give options for how long the hive can linger for
min_linger_time = 3 # s
max_linger_time = 30 # s
linger_options = range(min_linger_time, max_linger_time)
# Choose a random hive to begin at and update position
hive_no = np.random.randint(self.n_hives)
self.swarm_position = np.pi/12 + np.pi*hive_no/6
total_samples = n_samples
s_counter = 0 # Sample counter
positions = np.empty((total_samples, 2))
increment = self.swarm_speed_rad/self.sample_rate
tolerance = increment
while s_counter < total_samples:
# Allocate positions while hive is stationary
t_stay = linger_options[np.random.randint(len(linger_options))]
sample_num = t_stay * self.sample_rate
new_s_counter = s_counter + sample_num
current_position = np.array(
[[self.hive_radius * np.cos(self.swarm_position),
self.hive_radius * np.sin(self.swarm_position)]])
if s_counter + sample_num < total_samples:
positions[s_counter:new_s_counter, :] = \
np.full([sample_num,2], current_position)
else:
positions[s_counter:,:] = \
np.full([total_samples-s_counter,2], current_position)
# Choose the next hive at random
hive_no = np.random.randint(7)
destination_angle = np.pi/12 + np.pi*hive_no/6
# Swarm direction,
# Go either anticlockwise (+1) or clockwise (-1)
s_dir = 2 * np.random.randint(2) - 1
# Allocate positions while moving
while (new_s_counter < total_samples and
self.swarm_position != destination_angle):
# Increment swarm position
self.swarm_position = self.swarm_position + s_dir*increment
current_position = (
np.array([[self.hive_radius * np.cos(self.swarm_position),
self.hive_radius * np.sin(self.swarm_position)]]))
# Update positions array
positions[new_s_counter, :] = current_position
new_s_counter += 1
pos = (
self.swarm_position -
np.floor(self.swarm_position/(2*np.pi)) * 2 * np.pi)
if abs(destination_angle - pos) < tolerance:
self.swarm_position = destination_angle
s_counter = new_s_counter
return positions
def sample_swarm_volumes(self, n_samples):
swarm_positions = self.sample_swarm_positions(n_samples)
# print(swarm_positions)
# print("Position: {}".format(swarm_positions[0]), end=", ")
return hive_volumes(self.hives, swarm_positions)
class SwarmBuffer:
"""
Wraps Swarm and generates samples for a large sequence. These are then
returned chunk by chunk through sample_swarm_volumes.
"""
def __init__(self, *args, **kwargs):
self.swarm = Swarm(*args, **kwargs)
# Generate volumes for 10 mins
self.volumes = self.swarm.sample_swarm_volumes(41000 * 60 * 1)
self.next_sample = 0
def sample_swarm_volumes(self, n_samples):
end_sample = self.next_sample + n_samples
samples = np.take(
self.volumes,
range(self.next_sample, end_sample),
mode='wrap',
axis = 0)
self.next_sample = end_sample % self.volumes.shape[0]
return samples
def hive_volumes(hives, swarm_positions, sigma=1):
"""
Computes the volume at each hive based on the swarm position.
Parameters
----------
hives: array_like, (H, 2)
An array containing the 2D positions of each hive.
H denotes the number of hives.
swarm_positions: (N, 2)
The position of the swarm at each sample.
Returns
-------
hive_volumes: array_like, (H, N)
The volume for each hive at each sample.
"""
return rbf_kernel(swarm_positions, hives, gamma=1)
| bsd-2-clause |
krosenfeld/scatterbrane | docs/_code/time_variability.py | 1 | 3849 | '''
Generate a time series incorporating the motion of the screen across the source.
This script may take a long time to run. I suggest you read through it first and
adjust the num_samples variable to check out its performance.
'''
import numpy as np
from scipy.ndimage import imread
import time
import matplotlib.pyplot as plt
from palettable.cubehelix import jim_special_16
cmap = jim_special_16.mpl_colormap
plt.rcParams['image.origin'] = 'lower'
plt.rcParams['patch.edgecolor'] = 'white'
plt.rcParams['lines.linewidth'] = 2
from scatterbrane import Brane,utilities
# set up logger
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
# import our source image and covert it to gray scale
src_file = 'source_images/nh_01_stern_05_pluto_hazenew2.square.jpg'
rgb = imread(src_file)[::-1]
I = (np.array([0.2989,0.5870,0.1140])[np.newaxis,np.newaxis,:]*rgb).sum(axis=-1)
I *= np.pi/I.sum()
# make up some scale for our image.
write_figs = False
wavelength=1e-3
FOV = 90.
dx = FOV/I.shape[0]
# initialize the scattering screen @ 0.87mm
b = Brane(I,dx,wavelength=0.87e-3,nphi=(2**12,2**14),anisotropy=1,pa=None,r_inner=50,live_dangerously=True)
# estimate the time resolution of our simulation assuming some screen velocity.
screen_velocity = 200. #km/s
fs = screen_velocity/(b.screen_dx*b.ips) # Hz
num_samples = b.nphi[1]/b.ips - b.nx # try num_samples = 100 for testing porpoises.
logger.info('Number of samples: {0:g}'.format(num_samples))
logger.info('Sampling interval: {0:g}s'.format(1./fs))
logger.info('Time coverage: {0:g} days'.format(num_samples/fs/(3600.*24.)))
# generate the screen (this takes a while)
logger.info('generating screen...')
tic = time.time()
b.generatePhases()
logger.info('took {0:g}s'.format(time.time()-tic))
# generate time series (this takes a while)
logger.info('generating time series...')
fluxes = []
frames = []
tic = time.time()
for i in range(num_samples):
# update source image to include a sinusoidal flux modulation
b.setModel(I*(1. - 0.4*np.sin(2*np.pi*i/(2*num_samples))), dx) # comment out to speedup
b.scatter(move_pix=i*b.ips)
fluxes.append(b.iss.sum())
frames.append(b.iss)
logger.info('took {0:g}s'.format(time.time()-tic))
# 1962.92s
# make figures
fig_file = '../_static/time_variability/'
extent=b.dx*b.nx//2*np.array([1,-1,-1,1])
plt.figure()
plt.subplot(121)
isrc_smooth = utilities.smoothImage(b.isrc,b.dx,2.*b.dx)
plt.imshow(isrc_smooth,extent=extent,cmap=cmap)
plt.xlabel('$\Delta\\alpha$ [$\mu$as]'); plt.ylabel('$\Delta\delta$ [$\mu$as]')
plt.subplot(122)
iss_smooth = utilities.smoothImage(b.iss,b.dx,2.*b.dx)
plt.imshow(iss_smooth,extent=extent,cmap=cmap)
plt.gca().set_yticklabels(10*['']); plt.gca().set_xticklabels(10*[''])
if write_figs: plt.savefig(fig_file+'/iss.png',bbox_inches='tight')
plt.figure()
t = 1./fs*np.arange(len(fluxes))/3600.
plt.plot(t,fluxes,color='#377EB8')
plt.xlabel('time [hr]')
plt.ylabel('flux [Jy]')
plt.xlim([0,t.max()])
plt.grid()
if write_figs: plt.savefig(fig_file+'/flux.png',bbox_inches='tight')
# and a movie
import matplotlib.animation as animation
i = 0
def updatefig(*args):
global i
i = (i + 1) % num_samples
im.set_array(utilities.smoothImage(frames[i],b.dx,2*b.dx))
return im
plt.show()
fig = plt.figure(figsize=(8,6))
im = plt.imshow(utilities.smoothImage(frames[0],b.dx,2*b.dx), cmap=cmap, animated=True,
extent=extent, interpolation=None)
plt.xlabel('$\Delta\\alpha$ [$\mu$as]')
plt.ylabel('$\Delta\delta$ [$\mu$as]')
ani = animation.FuncAnimation(fig, updatefig, interval=50, blit=False, frames=int(1000))
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Katherine Rosenfeld'), bitrate=1800)
if write_figs:
logger.info('writing movie!')
ani.save('mov.mp4',writer=writer)
plt.close()
else:
plt.show()
| mit |
cactusbin/nyt | matplotlib/examples/user_interfaces/svg_tooltip.py | 6 | 3492 | """
SVG tooltip example
===================
This example shows how to create a tooltip that will show up when
hovering over a matplotlib patch.
Although it is possible to create the tooltip from CSS or javascript,
here we create it in matplotlib and simply toggle its visibility on
when hovering over the patch. This approach provides total control over
the tooltip placement and appearance, at the expense of more code up
front.
The alternative approach would be to put the tooltip content in `title`
atttributes of SVG objects. Then, using an existing js/CSS library, it
would be relatively straightforward to create the tooltip in the
browser. The content would be dictated by the `title` attribute, and
the appearance by the CSS.
:author: David Huard
"""
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
from StringIO import StringIO
ET.register_namespace("","http://www.w3.org/2000/svg")
fig, ax = plt.subplots()
# Create patches to which tooltips will be assigned.
circle = plt.Circle((0,0), 5, fc='blue')
rect = plt.Rectangle((-5, 10), 10, 5, fc='green')
ax.add_patch(circle)
ax.add_patch(rect)
# Create the tooltips
circle_tip = ax.annotate('This is a blue circle.',
xy=(0,0),
xytext=(30,-30),
textcoords='offset points',
color='w',
ha='left',
bbox=dict(boxstyle='round,pad=.5', fc=(.1,.1,.1,.92), ec=(1.,1.,1.), lw=1, zorder=1),
)
rect_tip = ax.annotate('This is a green rectangle.',
xy=(-5,10),
xytext=(30,40),
textcoords='offset points',
color='w',
ha='left',
bbox=dict(boxstyle='round,pad=.5', fc=(.1,.1,.1,.92), ec=(1.,1.,1.), lw=1, zorder=1),
)
# Set id for the patches
for i, t in enumerate(ax.patches):
t.set_gid('patch_%d'%i)
# Set id for the annotations
for i, t in enumerate(ax.texts):
t.set_gid('tooltip_%d'%i)
# Save the figure in a fake file object
ax.set_xlim(-30, 30)
ax.set_ylim(-30, 30)
ax.set_aspect('equal')
f = StringIO()
plt.savefig(f, format="svg")
# --- Add interactivity ---
# Create XML tree from the SVG file.
tree, xmlid = ET.XMLID(f.getvalue())
tree.set('onload', 'init(evt)')
# Hide the tooltips
for i, t in enumerate(ax.texts):
el = xmlid['tooltip_%d'%i]
el.set('visibility', 'hidden')
# Assign onmouseover and onmouseout callbacks to patches.
for i, t in enumerate(ax.patches):
el = xmlid['patch_%d'%i]
el.set('onmouseover', "ShowTooltip(this)")
el.set('onmouseout', "HideTooltip(this)")
# This is the script defining the ShowTooltip and HideTooltip functions.
script = """
<script type="text/ecmascript">
<![CDATA[
function init(evt) {
if ( window.svgDocument == null ) {
svgDocument = evt.target.ownerDocument;
}
}
function ShowTooltip(obj) {
var cur = obj.id.slice(-1);
var tip = svgDocument.getElementById('tooltip_' + cur);
tip.setAttribute('visibility',"visible")
}
function HideTooltip(obj) {
var cur = obj.id.slice(-1);
var tip = svgDocument.getElementById('tooltip_' + cur);
tip.setAttribute('visibility',"hidden")
}
]]>
</script>
"""
# Insert the script at the top of the file and save it.
tree.insert(0, ET.XML(script))
ET.ElementTree(tree).write('svg_tooltip.svg')
| unlicense |
rhyolight/NAB | nab/runner.py | 2 | 10315 | # ----------------------------------------------------------------------
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import multiprocessing
import os
import pandas
try:
import simplejson as json
except ImportError:
import json
from nab.corpus import Corpus
from nab.detectors.base import detectDataSet
from nab.labeler import CorpusLabel
from nab.optimizer import optimizeThreshold
from nab.scorer import scoreCorpus
from nab.util import updateThresholds, updateFinalResults
class Runner(object):
"""
Class to run an endpoint (detect, optimize, or score) on the NAB
benchmark using the specified set of profiles, thresholds, and/or detectors.
"""
def __init__(self,
dataDir,
resultsDir,
labelPath,
profilesPath,
thresholdPath,
numCPUs=None):
"""
@param dataDir (string) Directory where all the raw datasets exist.
@param resultsDir (string) Directory where the detector anomaly scores
will be scored.
@param labelPath (string) Path where the labels of the datasets
exist.
@param profilesPath (string) Path to JSON file containing application
profiles and associated cost matrices.
@param thresholdPath (string) Path to thresholds dictionary containing the
best thresholds (and their corresponding
score) for a combination of detector and
user profile.
@probationaryPercent (float) Percent of each dataset which will be
ignored during the scoring process.
@param numCPUs (int) Number of CPUs to be used for calls to
multiprocessing.pool.map
"""
self.dataDir = dataDir
self.resultsDir = resultsDir
self.labelPath = labelPath
self.profilesPath = profilesPath
self.thresholdPath = thresholdPath
self.pool = multiprocessing.Pool(numCPUs)
self.probationaryPercent = 0.15
self.windowSize = 0.10
self.corpus = None
self.corpusLabel = None
self.profiles = None
def initialize(self):
"""Initialize all the relevant objects for the run."""
self.corpus = Corpus(self.dataDir)
self.corpusLabel = CorpusLabel(path=self.labelPath, corpus=self.corpus)
with open(self.profilesPath) as p:
self.profiles = json.load(p)
def detect(self, detectors):
"""Generate results file given a dictionary of detector classes
Function that takes a set of detectors and a corpus of data and creates a
set of files storing the alerts and anomaly scores given by the detectors
@param detectors (dict) Dictionary with key value pairs of a
detector name and its corresponding
class constructor.
"""
print "\nRunning detection step"
count = 0
args = []
for detectorName, detectorConstructor in detectors.iteritems():
for relativePath, dataSet in self.corpus.dataFiles.iteritems():
if self.corpusLabel.labels.has_key(relativePath):
args.append(
(
count,
detectorConstructor(
dataSet=dataSet,
probationaryPercent=self.probationaryPercent),
detectorName,
self.corpusLabel.labels[relativePath]["label"],
self.resultsDir,
relativePath
)
)
count += 1
# Using `map_async` instead of `map` so interrupts are properly handled.
# See: http://stackoverflow.com/a/1408476
self.pool.map_async(detectDataSet, args).get(99999999)
def optimize(self, detectorNames):
"""Optimize the threshold for each combination of detector and profile.
@param detectorNames (list) List of detector names.
@return thresholds (dict) Dictionary of dictionaries with detector names
then profile names as keys followed by another
dictionary containing the score and the
threshold used to obtained that score.
"""
print "\nRunning optimize step"
scoreFlag = False
thresholds = {}
for detectorName in detectorNames:
resultsDetectorDir = os.path.join(self.resultsDir, detectorName)
resultsCorpus = Corpus(resultsDetectorDir)
thresholds[detectorName] = {}
for profileName, profile in self.profiles.iteritems():
thresholds[detectorName][profileName] = optimizeThreshold(
(detectorName,
profile["CostMatrix"],
resultsCorpus,
self.corpusLabel,
self.probationaryPercent))
updateThresholds(thresholds, self.thresholdPath)
return thresholds
def score(self, detectorNames, thresholds):
"""Score the performance of the detectors.
Function that must be called only after detection result files have been
generated and thresholds have been optimized. This looks at the result files
and scores the performance of each detector specified and stores these
results in a csv file.
@param detectorNames (list) List of detector names.
@param thresholds (dict) Dictionary of dictionaries with detector
names then profile names as keys followed by
another dictionary containing the score and
the threshold used to obtained that score.
"""
print "\nRunning scoring step"
scoreFlag = True
baselines = {}
self.resultsFiles = []
for detectorName in detectorNames:
resultsDetectorDir = os.path.join(self.resultsDir, detectorName)
resultsCorpus = Corpus(resultsDetectorDir)
for profileName, profile in self.profiles.iteritems():
threshold = thresholds[detectorName][profileName]["threshold"]
resultsDF = scoreCorpus(threshold,
(self.pool,
detectorName,
profileName,
profile["CostMatrix"],
resultsDetectorDir,
resultsCorpus,
self.corpusLabel,
self.probationaryPercent,
scoreFlag))
scorePath = os.path.join(resultsDetectorDir, "%s_%s_scores.csv" %\
(detectorName, profileName))
resultsDF.to_csv(scorePath, index=False)
print "%s detector benchmark scores written to %s" %\
(detectorName, scorePath)
self.resultsFiles.append(scorePath)
def normalize(self):
"""
Normalize the detectors' scores according to the baseline defined by the
null detector, and print to the console.
Function can only be called with the scoring step (i.e. runner.score())
preceding it.
This reads the total score values from the results CSVs, and
subtracts the relevant baseline value. The scores are then normalized by
multiplying by 100 and dividing by perfect less the baseline, where the
perfect score is the number of TPs possible.
Note the results CSVs still contain the original scores, not normalized.
"""
print "\nRunning score normalization step"
# Get baseline scores for each application profile.
nullDir = os.path.join(self.resultsDir, "null")
if not os.path.isdir(nullDir):
raise IOError("No results directory for null detector. You must "
"run the null detector before normalizing scores.")
baselines = {}
for profileName, _ in self.profiles.iteritems():
fileName = os.path.join(nullDir,
"null_" + profileName + "_scores.csv")
with open(fileName) as f:
results = pandas.read_csv(f)
baselines[profileName] = results["Score"].iloc[-1]
# Get total number of TPs
with open(self.labelPath, "rb") as f:
labelsDict = json.load(f)
tpCount = 0
for labels in labelsDict.values():
tpCount += len(labels)
# Normalize the score from each results file.
finalResults = {}
for resultsFile in self.resultsFiles:
profileName = [k for k in baselines.keys() if k in resultsFile][0]
base = baselines[profileName]
with open(resultsFile) as f:
results = pandas.read_csv(f)
# Calculate score:
perfect = tpCount * self.profiles[profileName]["CostMatrix"]["tpWeight"]
score = 100 * (results["Score"].iloc[-1] - base) / (perfect - base)
# Add to results dict:
resultsInfo = resultsFile.split(os.path.sep)[-1].split('.')[0]
detector = resultsInfo.split('_')[0]
profile = resultsInfo.replace(detector + "_", "").replace("_scores", "")
if detector not in finalResults:
finalResults[detector] = {}
finalResults[detector][profile] = score
print ("Final score for \'%s\' detector on \'%s\' profile = %.2f"
% (detector, profile, score))
resultsPath = os.path.join(self.resultsDir, "final_results.json")
updateFinalResults(finalResults, resultsPath)
print "Final scores have been written to %s." % resultsPath
| agpl-3.0 |
tdhopper/scikit-learn | sklearn/externals/joblib/parallel.py | 79 | 35628 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
# Under Python 3.4+ use the 'forkserver' start method by default: this makes it
# possible to avoid crashing 3rd party libraries that manage an internal thread
# pool that does not tolerate forking
if hasattr(mp, 'get_start_method'):
method = os.environ.get('JOBLIB_START_METHOD')
if (method is None and mp.get_start_method() == 'fork'
and 'forkserver' in mp.get_all_start_methods()):
method = 'forkserver'
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = DEFAULT_MP_CONTEXT
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
with self._lock:
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
jmschrei/scikit-learn | examples/feature_selection/plot_feature_selection.py | 95 | 2847 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='darkorange')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight',
color='navy')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='c')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
fcchou/CS229-project | learning/final5.py | 1 | 2764 | import sys
sys.path.append('../')
from jos_learn.features import FeatureExtract
import numpy as np
import matplotlib.pyplot as plt
import sklearn.cross_validation as cv
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC, SVC
from sklearn.metrics import accuracy_score, f1_score
from sklearn.metrics import precision_score, recall_score
from sklearn.grid_search import GridSearchCV
from sklearn.feature_extraction.text import TfidfTransformer
import cPickle as pickle
from sklearn.decomposition import TruncatedSVD, PCA, KernelPCA
from sklearn.feature_selection import SelectKBest, chi2
# Setup the features
extractor = FeatureExtract()
labels = extractor.labels
works = extractor.works
labels[labels == -1] = 0
feature1 = extractor.feature_cp[0]
feature_shell = []
length = [2, 3]
for l in length:
folder = '../shell/length%d_no_mirror' % l
dict_list = []
for work in works:
data = pickle.load(open('%s/%s.pkl' % (folder, work), 'rb'))
dict_list.append(data)
feature, names = extractor._vectorize(dict_list)
feature_shell.append(feature)
feature_shell = np.hstack(feature_shell)
normalizer = TfidfTransformer()
feature1 = normalizer.fit_transform(feature1).toarray()
feature2 = normalizer.fit_transform(feature_shell).toarray()
SVD1 = TruncatedSVD(n_components=300)
SVD2 = TruncatedSVD(n_components=200)
feature1 = SVD1.fit_transform(feature1)
feature2 = SVD2.fit_transform(feature2)
feature = np.hstack((feature1, feature2))
feature_unsec = feature[labels == 2]
feature = feature[labels != 2]
labels = labels[labels != 2]
clf1 = SVC(C=10000, gamma=0.75, probability=True)
#clf2 = LinearSVC(C=100, probability=True)
clf2 = SVC(kernel='linear', C=100, probability=True)
clf3 = LogisticRegression(C=100)
sfk = cv.StratifiedShuffleSplit(labels, 100)
scores = []
for train, test in sfk:
score = []
train_set = feature[train]
test_set = feature[test]
clf1.fit(train_set, labels[train])
pred1 = clf1.predict(test_set)
prob1 = clf1.predict_proba(test_set)[:, 1]
clf2.fit(train_set, labels[train])
pred2 = clf2.predict(test_set)
prob2 = clf2.predict_proba(test_set)[:, 1]
clf3.fit(train_set, labels[train])
pred3 = clf3.predict(test_set)
prob3 = clf3.predict_proba(test_set)[:, 1]
prob_avg = (prob1 + prob2 + prob3) / 3
pred = np.zeros_like(pred1)
pred[prob_avg > 0.7] = 1
#pred = pred1 * pred2 * pred3
score.append(accuracy_score(labels[test], pred))
score.append(precision_score(labels[test], pred))
score.append(recall_score(labels[test], pred))
score.append(f1_score(labels[test], pred))
scores.append(score)
avg = np.average(scores, axis=0)
print avg
| apache-2.0 |
xubenben/scikit-learn | examples/decomposition/plot_incremental_pca.py | 244 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
petosegan/scikit-learn | sklearn/utils/extmath.py | 142 | 21102 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
tsennikova/scientists-analysis | preprocessing/time_series_normalization.py | 1 | 4192 | '''
Created on 19 Jul 2016
@author: sennikta
'''
import os
from sklearn import preprocessing
import numpy as np
from os import listdir
import calendar
import collections
import csv
from itertools import islice
import pandas as pd
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}, threshold=np.inf)
base_dir = os.path.dirname(os.path.dirname(__file__))
data_dir = os.path.join(base_dir, 'data')
seed_dir = os.path.join(data_dir, 'seed')
baseline_dir = os.path.join(data_dir, 'baseline')
google_trends_dir = os.path.join(data_dir, 'google_trends')
google_trends_norm_dir = os.path.join(data_dir, 'google_trends_normalized')
google_trends_norm_scientist_dir = os.path.join(google_trends_norm_dir, 'scientists')
views_dir = os.path.join(data_dir, 'views')
views_norm_dir = os.path.join(data_dir, 'views_normalized')
views_norm_scientist_dir = os.path.join(views_norm_dir, 'scientists')
views_norm_topics_dir = os.path.join(views_norm_dir, 'topics')
edits_dir = os.path.join(data_dir, 'edits')
edits_norm_dir = os.path.join(data_dir, 'edits_normalized')
edits_norm_scientist_dir = os.path.join(edits_norm_dir, 'scientists')
edits_norm_topics_dir = os.path.join(edits_norm_dir, 'topics')
# Change address for each dataset: views, edits, google_trends
scientists_dir = os.path.join(edits_dir, 'scientists')
topic_dir = os.path.join(views_dir, 'topics')
# Split normalized time series back to years
def split_time_series(time_series_norm, years_list):
i = 0
time_dict = {}
time_series_norm = time_series_norm.tolist()
for year in years_list:
if calendar.isleap(year) == True:
year_series = time_series_norm[0][i:i+366]
year_series.insert(0,year)
i += 366
else:
year_series = time_series_norm[0][i:i+365]
year_series.insert(0,year)
i += 365
time_dict.update({year:year_series})
year_series = []
time_dict = collections.OrderedDict(sorted(time_dict.items()))
return time_dict
def output_txt(time_dict, file_name):
output_path = os.path.join(views_norm_topics_dir, file_name)
text_file = open(output_path, "w")
for key in time_dict:
text_file.write(",".join(map(lambda x: str(x), time_dict[key])))
text_file.write("\n")
text_file.close()
time_dict = {}
return
# For views and edits
def read_txt(dir):
files_list = listdir(dir)
for file_name in files_list:
print file_name
time_list = []
time_series = []
years_list = []
txtname = os.path.join(dir + '\\' + file_name)
f = open(txtname)
for line in f:
time_list = map(int, line.split(','))
years_list.append(time_list[0])
time_list.pop(0)
time_series += time_list
f.close()
# Normalization mean=0, std=1
time_series = np.array([time_series], dtype=np.float64)
time_series_norm = preprocessing.scale(time_series, axis=1)
time_dict = split_time_series(time_series_norm, years_list)
output_txt(time_dict, file_name)
time_series.fill(0)
time_series_norm.fill(0)
return
def read_csv(dir):
files_list = listdir(dir)
for file_name in files_list:
print file_name
output_path = os.path.join(google_trends_norm_scientist_dir, file_name)
time_series = []
week_intervals = []
csvname = os.path.join(dir + '\\' + file_name)
f = open(csvname, 'rb')
reader = csv.reader(f)
# skip template
for row in islice(reader, 5, 657):
time_series.append(row[1])
week_intervals.append(row[0])
f.close()
time_series = np.array([time_series], dtype=np.float64)
time_series_norm = preprocessing.scale(time_series, axis=1)
data = pd.DataFrame({'Week':week_intervals, 'Interest':time_series_norm[0]})
data.to_csv(output_path, sep=',')
return
read_txt(topic_dir)
| mit |
MohammedWasim/scikit-learn | sklearn/preprocessing/label.py | 137 | 27165 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Joel Nothman <joel.nothman@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
shanot/imp | modules/pmi/pyext/src/representation.py | 1 | 111198 | #!/usr/bin/env python
"""@namespace IMP.pmi.representation
Representation of the system.
"""
from __future__ import print_function
import IMP
import IMP.core
import IMP.algebra
import IMP.atom
import IMP.display
import IMP.isd
import IMP.pmi
import IMP.pmi.tools
import IMP.pmi.output
import IMP.rmf
import IMP.pmi.topology
import RMF
from math import pi, sqrt
from operator import itemgetter
import os
import weakref
class _Repo(object):
def __init__(self, doi, root):
self.doi = doi
self._root = root
def get_fname(self, fname):
"""Return a path relative to the top of the repository"""
return os.path.relpath(fname, self._root)
class _StateInfo(object):
"""Score state-specific information about this representation."""
short_name = None
long_name = None
class Representation(object):
# Authors: Peter Cimermancic, Riccardo Pellarin, Charles Greenberg
'''
Set up the representation of all proteins and nucleic acid macromolecules.
Create the molecular hierarchies, representation,
sequence connectivity for the various involved proteins and
nucleic acid macromolecules:
Create a protein, DNA or RNA, represent it as a set of connected balls of appropriate
radii and number of residues, PDB at given resolution(s), or ideal helices.
How to use the SimplifiedModel class (typical use):
see test/test_hierarchy_contruction.py
examples:
1) Create a chain of helices and flexible parts
c_1_119 =self.add_component_necklace("prot1",1,119,20)
c_120_131 =self.add_component_ideal_helix("prot1",resolutions=[1,10],resrange=(120,131))
c_132_138 =self.add_component_beads("prot1",[(132,138)])
c_139_156 =self.add_component_ideal_helix("prot1",resolutions=[1,10],resrange=(139,156))
c_157_174 =self.add_component_beads("prot1",[(157,174)])
c_175_182 =self.add_component_ideal_helix("prot1",resolutions=[1,10],resrange=(175,182))
c_183_194 =self.add_component_beads("prot1",[(183,194)])
c_195_216 =self.add_component_ideal_helix("prot1",resolutions=[1,10],resrange=(195,216))
c_217_250 =self.add_component_beads("prot1",[(217,250)])
self.set_rigid_body_from_hierarchies(c_120_131)
self.set_rigid_body_from_hierarchies(c_139_156)
self.set_rigid_body_from_hierarchies(c_175_182)
self.set_rigid_body_from_hierarchies(c_195_216)
clist=[c_1_119,c_120_131,c_132_138,c_139_156,c_157_174,c_175_182,c_183_194,c_195_216,
c_217_250]
self.set_chain_of_super_rigid_bodies(clist,2,3)
self.set_super_rigid_bodies(["prot1"])
'''
def __init__(self, m, upperharmonic=True, disorderedlength=True):
"""Constructor.
@param m the model
@param upperharmonic This flag uses either harmonic (False)
or upperharmonic (True) in the intra-pair
connectivity restraint.
@param disorderedlength This flag uses either disordered length
calculated for random coil peptides (True) or zero
surface-to-surface distance between beads (False)
as optimal distance for the sequence connectivity
restraint.
"""
self.state = _StateInfo()
self._metadata = []
self._file_dataset = {}
self._protocol_output = []
# this flag uses either harmonic (False) or upperharmonic (True)
# in the intra-pair connectivity restraint. Harmonic is used whe you want to
# remove the intra-ev term from energy calculations, e.g.:
# upperharmonic=False
# ip=self.get_connected_intra_pairs()
# ev.add_excluded_particle_pairs(ip)
self.upperharmonic = upperharmonic
self.disorderedlength = disorderedlength
self.rigid_bodies = []
self.fixed_rigid_bodies = []
self.fixed_floppy_bodies = []
self.floppy_bodies = []
# self.super_rigid_bodies is a list of tuples.
# each tuple, corresponding to a particular super rigid body
# the tuple is (super_rigid_xyzs,super_rigid_rbs)
# where super_rigid_xyzs are the flexible xyz particles
# and super_rigid_rbs is the list of rigid bodies.
self.super_rigid_bodies = []
self.rigid_body_symmetries = []
self.output_level = "low"
self.label = "None"
self.maxtrans_rb = 2.0
self.maxrot_rb = 0.04
self.maxtrans_srb = 2.0
self.maxrot_srb = 0.2
self.rigidbodiesarefixed = False
self.floppybodiesarefixed = False
self.maxtrans_fb = 3.0
self.resolution = 10.0
self.bblenght = 100.0
self.kappa = 100.0
self.m = m
self.representation_is_modified = False
self.unmodeledregions_cr_dict = {}
self.sortedsegments_cr_dict = {}
self.prot = IMP.atom.Hierarchy.setup_particle(IMP.Particle(self.m))
self.connected_intra_pairs = []
self.hier_dict = {}
self.color_dict = {}
self.sequence_dict = {}
self.hier_geometry_pairs = {}
self.hier_db = IMP.pmi.tools.HierarchyDatabase()
# this dictionary stores the hierarchies by component name and representation type
# self.hier_representation[name][representation_type]
# where representation type is Res:X, Beads, Densities, Representation,
# etc...
self.hier_representation = {}
self.hier_resolution = {}
# reference structures is a dictionary that contains the coordinates of
# structures that are used to calculate the rmsd
self.reference_structures = {}
self.elements = {}
self.linker_restraints = IMP.RestraintSet(self.m, "linker_restraints")
self.linker_restraints.set_was_used(True)
self.linker_restraints_dict = {}
self.threetoone = {'ALA': 'A', 'ARG': 'R', 'ASN': 'N', 'ASP': 'D',
'CYS': 'C', 'GLU': 'E', 'GLN': 'Q', 'GLY': 'G',
'HIS': 'H', 'ILE': 'I', 'LEU': 'L', 'LYS': 'K',
'MET': 'M', 'PHE': 'F', 'PRO': 'P', 'SER': 'S',
'THR': 'T', 'TRP': 'W', 'TYR': 'Y', 'VAL': 'V', 'UNK': 'X'}
self.onetothree = dict((v, k) for k, v in self.threetoone.items())
self.residuenamekey = IMP.StringKey("ResidueName")
def add_metadata(self, m):
"""Associate some metadata with this modeling.
@param m an instance of IMP.pmi.metadata.Metadata or a subclass.
"""
self._metadata.append(m)
def set_file_dataset(self, fname, dataset):
"""Associate a dataset with a filename.
This can be used to identify how the file was produced (in many
cases IMP can determine this automatically from a file header or
other metadata, but not always). For example, a manually-produced
PDB file (not from the PDB database or Modeller) can be
identified this way.
@param fname filename
@dataset the IMP.pmi.metadata.Dataset object to associate.
"""
self._file_dataset[os.path.abspath(fname)] = dataset
def get_file_dataset(self, fname):
"""Get the dataset associated with a filename, or None.
@param fname filename
@return an IMP.pmi.metadata.Dataset, or None.
"""
return self._file_dataset.get(os.path.abspath(fname), None)
def add_protocol_output(self, p):
"""Capture details of the modeling protocol.
@param p an instance of IMP.pmi.output.ProtocolOutput or a subclass.
"""
state = p._add_state(self)
self._protocol_output.append((p, state))
p._each_metadata.append(self._metadata)
p._file_datasets.append(self._file_dataset)
state.m = self.m
state.prot = self.prot
protocol_output = property(lambda self:
[x[0] for x in self._protocol_output])
def set_label(self, label):
self.label = label
def create_component(self, name, color=0.0):
protein_h = IMP.atom.Molecule.setup_particle(IMP.Particle(self.m))
protein_h.set_name(name)
self.hier_dict[name] = protein_h
self.hier_representation[name] = {}
self.hier_db.add_name(name)
self.prot.add_child(protein_h)
self.color_dict[name] = color
self.elements[name] = []
for p, state in self._protocol_output:
p.create_component(state, name, True)
def create_non_modeled_component(self, name):
"""Create a component that isn't used in the modeling.
No coordinates or other structural data for this component will
be read or written, but a primary sequence can be assigned. This
is useful if the input experimental data is of a system larger
than that modeled. Any references to these non-modeled components
can then be correctly resolved later."""
self.elements[name] = []
for p, state in self._protocol_output:
p.create_component(state, name, False)
# Deprecation warning
@IMP.deprecated_method("2.5", "Use create_component() instead.")
def add_component_name(self, *args, **kwargs):
self.create_component(*args, **kwargs)
def get_component_names(self):
return list(self.hier_dict.keys())
def add_component_sequence(self, name, filename, id=None, offs=None,
format="FASTA"):
'''
Add the primary sequence for a single component.
@param name Human-readable name of the component
@param filename Name of the FASTA file
@param id Identifier of the sequence in the FASTA file header
(if not provided, use `name` instead)
'''
record_dict = IMP.pmi.topology.Sequences(filename)
if id is None:
id = name
if id not in record_dict:
raise KeyError("id %s not found in fasta file" % id)
length = len(record_dict[id])
self.sequence_dict[name] = str(record_dict[id])
if offs is not None:
offs_str="-"*offs
self.sequence_dict[name]=offs_str+self.sequence_dict[name]
self.elements[name].append((length, length, " ", "end"))
for p, state in self._protocol_output:
p.add_component_sequence(name, self.sequence_dict[name])
def autobuild_model(self, name, pdbname, chain,
resolutions=None, resrange=None,
missingbeadsize=20,
color=None, pdbresrange=None, offset=0,
show=False, isnucleicacid=False,
attachbeads=False):
self.representation_is_modified = True
outhiers = []
if color is None:
color = self.color_dict[name]
else:
self.color_dict[name] = color
if resolutions is None:
resolutions = [1]
print("autobuild_model: constructing %s from pdb %s and chain %s" % (name, pdbname, str(chain)))
# get the initial and end residues of the pdb
t = IMP.atom.read_pdb(pdbname, self.m,
IMP.atom.AndPDBSelector(IMP.atom.ChainPDBSelector(chain), IMP.atom.CAlphaPDBSelector()))
# find start and end indexes
start = IMP.atom.Residue(
t.get_children()[0].get_children()[0]).get_index()
end = IMP.atom.Residue(
t.get_children()[0].get_children()[-1]).get_index()
# check if resrange was defined, otherwise
# use the sequence, or the pdb resrange
if resrange is None:
if name in self.sequence_dict:
resrange = (1, len(self.sequence_dict[name]))
else:
resrange = (start + offset, end + offset)
else:
if resrange[1] in (-1, 'END'):
resrange = (resrange[0],end)
start = resrange[0] - offset
end = resrange[1] - offset
gaps = IMP.pmi.tools.get_residue_gaps_in_hierarchy(
t,
resrange[0],
resrange[1])
xyznter = IMP.pmi.tools.get_closest_residue_position(
t,
resrange[0],
terminus="N")
xyzcter = IMP.pmi.tools.get_closest_residue_position(
t,
resrange[1],
terminus="C")
# Done with the PDB
IMP.atom.destroy(t)
# construct pdb fragments and intervening beads
for n, g in enumerate(gaps):
first = g[0]
last = g[1]
if g[2] == "cont":
print("autobuild_model: constructing fragment %s from pdb" % (str((first, last))))
outhiers += self.add_component_pdb(name, pdbname,
chain, resolutions=resolutions,
color=color, cacenters=True,
resrange=(first, last),
offset=offset, isnucleicacid=isnucleicacid)
elif g[2] == "gap" and n > 0:
print("autobuild_model: constructing fragment %s as a bead" % (str((first, last))))
parts = self.hier_db.get_particles_at_closest_resolution(name,
first + offset - 1,
1)
xyz = IMP.core.XYZ(parts[0]).get_coordinates()
outhiers += self.add_component_necklace(name,
first+offset, last+offset, missingbeadsize, incoord=xyz)
elif g[2] == "gap" and n == 0:
# add pre-beads
print("autobuild_model: constructing fragment %s as a bead" % (str((first, last))))
outhiers += self.add_component_necklace(name,
first+offset, last+offset, missingbeadsize, incoord=xyznter)
return outhiers
# Deprecation warning
@IMP.deprecated_method("2.5", "Use autobuild_model() instead.")
def autobuild_pdb_and_intervening_beads(self, *args, **kwargs):
r = self.autobuild_model(*args, **kwargs)
return r
def add_component_pdb(self, name, pdbname, chain, resolutions, color=None,
resrange=None, offset=0, cacenters=True, show=False,
isnucleicacid=False, readnonwateratoms=False,
read_ca_cb_only=False):
'''
Add a component that has an associated 3D structure in a PDB file.
Reads the PDB, and constructs the fragments corresponding to contiguous
sequence stretches.
@return a list of hierarchies.
@param name (string) the name of the component
@param pdbname (string) the name of the PDB file
@param chain (string or integer) can be either a string (eg, "A")
or an integer (eg, 0 or 1) in case you want
to get the corresponding chain number in the PDB.
@param resolutions (integers) a list of integers that corresponds
to the resolutions that have to be generated
@param color (float from 0 to 1) the color applied to the
hierarchies generated
@param resrange (tuple of integers): the residue range to extract
from the PDB. It is a tuple (beg,end). If not specified,
all residues belonging to the specified chain are read.
@param offset (integer) specifies the residue index offset to be
applied when reading the PDB (the FASTA sequence is
assumed to start from residue 1, so use this parameter
if the PDB file does not start at residue 1)
@param cacenters (boolean) if True generates resolution=1 beads
centered on C-alpha atoms.
@param show (boolean) print out the molecular hierarchy at the end.
@param isnucleicacid (boolean) use True if you're reading a PDB
with nucleic acids.
@param readnonwateratoms (boolean) if True fixes some pathological PDB.
@param read_ca_cb_only (boolean) if True, only reads CA/CB
'''
self.representation_is_modified = True
if color is None:
# if the color is not passed, then get the stored color
color = self.color_dict[name]
protein_h = self.hier_dict[name]
outhiers = []
# determine selector
sel = IMP.atom.NonWaterNonHydrogenPDBSelector()
if read_ca_cb_only:
cacbsel = IMP.atom.OrPDBSelector(
IMP.atom.CAlphaPDBSelector(),
IMP.atom.CBetaPDBSelector())
sel = IMP.atom.AndPDBSelector(cacbsel, sel)
if type(chain) == str:
sel = IMP.atom.AndPDBSelector(
IMP.atom.ChainPDBSelector(chain),
sel)
t = IMP.atom.read_pdb(pdbname, self.m, sel)
# get the first and last residue
start = IMP.atom.Residue(
t.get_children()[0].get_children()[0]).get_index()
end = IMP.atom.Residue(
t.get_children()[0].get_children()[-1]).get_index()
c = IMP.atom.Chain(IMP.atom.get_by_type(t, IMP.atom.CHAIN_TYPE)[0])
else:
t = IMP.atom.read_pdb(pdbname, self.m, sel)
c = IMP.atom.Chain(
IMP.atom.get_by_type(t, IMP.atom.CHAIN_TYPE)[chain])
# get the first and last residue
start = IMP.atom.Residue(c.get_children()[0]).get_index()
end = IMP.atom.Residue(c.get_children()[-1]).get_index()
chain = c.get_id()
if not resrange is None:
if resrange[0] > start:
start = resrange[0]
if resrange[1] < end:
end = resrange[1]
if not isnucleicacid:
# do what you have to do for proteins
sel = IMP.atom.Selection(
c,
residue_indexes=list(range(
start,
end + 1)),
atom_type=IMP.atom.AT_CA)
else:
# do what you have to do for nucleic-acids
# to work, nucleic acids should not be indicated as HETATM in the pdb
sel = IMP.atom.Selection(
c,
residue_indexes=list(range(
start,
end + 1)),
atom_type=IMP.atom.AT_P)
ps = sel.get_selected_particles()
if len(ps) == 0:
raise ValueError("%s no residue found in pdb %s chain %s that overlaps with the queried stretch %s-%s" \
% (name, pdbname, str(chain), str(resrange[0]), str(resrange[1])))
c0 = IMP.atom.Chain.setup_particle(IMP.Particle(self.m), "X")
for p in ps:
par = IMP.atom.Atom(p).get_parent()
ri = IMP.atom.Residue(par).get_index()
# Move residue from original PDB hierarchy to new chain c0
IMP.atom.Residue(par).set_index(ri + offset)
if par.get_parent() != c0:
par.get_parent().remove_child(par)
c0.add_child(par)
start = start + offset
end = end + offset
self.elements[name].append(
(start, end, pdbname.split("/")[-1] + ":" + chain, "pdb"))
hiers = self.coarse_hierarchy(name, start, end,
resolutions, isnucleicacid, c0, protein_h, "pdb", color)
outhiers += hiers
for p, state in self._protocol_output:
p.add_pdb_element(state, name, start, end, offset, pdbname, chain,
hiers[0])
if show:
IMP.atom.show_molecular_hierarchy(protein_h)
# We cannot simply destroy(c0) since it might not be a well-behaved
# hierarchy; in some cases it could contain a given residue more than
# once (this is surely a bug but we need to keep this behavior for
# backwards compatibility).
residues = {}
for p in ps:
par = IMP.atom.Atom(p).get_parent()
residues[par] = None
for r in residues.keys():
IMP.atom.destroy(r)
self.m.remove_particle(c0)
IMP.atom.destroy(t)
return outhiers
def add_component_ideal_helix(
self,
name,
resolutions,
resrange,
color=None,
show=False):
self.representation_is_modified = True
from math import pi, cos, sin
protein_h = self.hier_dict[name]
outhiers = []
if color is None:
color = self.color_dict[name]
start = resrange[0]
end = resrange[1]
self.elements[name].append((start, end, " ", "helix"))
c0 = IMP.atom.Chain.setup_particle(IMP.Particle(self.m), "X")
for n, res in enumerate(range(start, end + 1)):
if name in self.sequence_dict:
try:
rtstr = self.onetothree[
self.sequence_dict[name][res-1]]
except:
rtstr = "UNK"
rt = IMP.atom.ResidueType(rtstr)
else:
rt = IMP.atom.ResidueType("ALA")
# get the residue volume
try:
vol = IMP.atom.get_volume_from_residue_type(rt)
# mass=IMP.atom.get_mass_from_residue_type(rt)
except IMP.ValueException:
vol = IMP.atom.get_volume_from_residue_type(
IMP.atom.ResidueType("ALA"))
# mass=IMP.atom.get_mass_from_residue_type(IMP.atom.ResidueType("ALA"))
radius = IMP.algebra.get_ball_radius_from_volume_3d(vol)
r = IMP.atom.Residue.setup_particle(IMP.Particle(self.m), rt, res)
p = IMP.Particle(self.m)
d = IMP.core.XYZR.setup_particle(p)
x = 2.3 * cos(n * 2 * pi / 3.6)
y = 2.3 * sin(n * 2 * pi / 3.6)
z = 6.2 / 3.6 / 2 * n * 2 * pi / 3.6
d.set_coordinates(IMP.algebra.Vector3D(x, y, z))
d.set_radius(radius)
# print d
a = IMP.atom.Atom.setup_particle(p, IMP.atom.AT_CA)
r.add_child(a)
c0.add_child(r)
outhiers += self.coarse_hierarchy(name, start, end,
resolutions, False, c0, protein_h, "helix", color)
if show:
IMP.atom.show_molecular_hierarchy(protein_h)
IMP.atom.destroy(c0)
return outhiers
def add_component_beads(self, name, ds, colors=None, incoord=None):
""" add beads to the representation
@param name the component name
@param ds a list of tuples corresponding to the residue ranges
of the beads
@param colors a list of colors associated to the beads
@param incoord the coordinate tuple corresponding to the position
of the beads
"""
from math import pi
self.representation_is_modified = True
protein_h = self.hier_dict[name]
outhiers = []
if colors is None:
colors = [self.color_dict[name]]
for n, dss in enumerate(ds):
ds_frag = (dss[0], dss[1])
self.elements[name].append((dss[0], dss[1], " ", "bead"))
prt = IMP.Particle(self.m)
if ds_frag[0] == ds_frag[1]:
# if the bead represent a single residue
if name in self.sequence_dict:
try:
rtstr = self.onetothree[
self.sequence_dict[name][ds_frag[0]-1]]
except:
rtstr = "UNK"
rt = IMP.atom.ResidueType(rtstr)
else:
rt = IMP.atom.ResidueType("ALA")
h = IMP.atom.Residue.setup_particle(prt, rt, ds_frag[0])
h.set_name(name + '_%i_bead' % (ds_frag[0]))
prt.set_name(name + '_%i_bead' % (ds_frag[0]))
resolution = 1
else:
h = IMP.atom.Fragment.setup_particle(prt)
h.set_name(name + '_%i-%i_bead' % (ds_frag[0], ds_frag[1]))
prt.set_name(name + '_%i-%i_bead' % (ds_frag[0], ds_frag[1]))
h.set_residue_indexes(list(range(ds_frag[0], ds_frag[1] + 1)))
resolution = len(h.get_residue_indexes())
if "Beads" not in self.hier_representation[name]:
root = IMP.atom.Hierarchy.setup_particle(IMP.Particle(self.m))
root.set_name("Beads")
self.hier_representation[name]["Beads"] = root
protein_h.add_child(root)
self.hier_representation[name]["Beads"].add_child(h)
for kk in range(ds_frag[0], ds_frag[1] + 1):
self.hier_db.add_particles(name, kk, resolution, [h])
try:
clr = IMP.display.get_rgb_color(colors[n])
except:
clr = IMP.display.get_rgb_color(colors[0])
IMP.display.Colored.setup_particle(prt, clr)
# decorate particles according to their resolution
IMP.pmi.Resolution.setup_particle(prt, resolution)
IMP.core.XYZR.setup_particle(prt)
ptem = IMP.core.XYZR(prt)
mass = IMP.atom.get_mass_from_number_of_residues(resolution)
if resolution == 1:
try:
vol = IMP.atom.get_volume_from_residue_type(rt)
except IMP.ValueException:
vol = IMP.atom.get_volume_from_residue_type(
IMP.atom.ResidueType("ALA"))
radius = IMP.algebra.get_ball_radius_from_volume_3d(vol)
IMP.atom.Mass.setup_particle(prt, mass)
ptem.set_radius(radius)
else:
volume = IMP.atom.get_volume_from_mass(mass)
radius = 0.8 * (3.0 / 4.0 / pi * volume) ** (1.0 / 3.0)
IMP.atom.Mass.setup_particle(prt, mass)
ptem.set_radius(radius)
try:
if not tuple(incoord) is None:
ptem.set_coordinates(incoord)
except TypeError:
pass
IMP.pmi.Uncertainty.setup_particle(prt, radius)
IMP.pmi.Symmetric.setup_particle(prt, 0)
self.floppy_bodies.append(prt)
IMP.core.XYZ(prt).set_coordinates_are_optimized(True)
outhiers += [h]
for p, state in self._protocol_output:
p.add_bead_element(state, name, ds[0][0], ds[-1][1], len(ds),
outhiers[0])
return outhiers
def add_component_necklace(self, name, begin, end, length, color=None,
incoord=None):
'''
Generates a string of beads with given length.
'''
self.representation_is_modified = True
outhiers = []
if color is None:
colors=None
else:
colors=[color]
for chunk in IMP.pmi.tools.list_chunks_iterator(range(begin, end + 1), length):
outhiers += self.add_component_beads(name,
[(chunk[0], chunk[-1])], colors=colors,incoord=incoord)
return outhiers
def add_component_density(
self, name, hierarchies=None, selection_tuples=None,
particles=None,
resolution=0.0, num_components=10,
inputfile=None, outputfile=None,
outputmap=None,
kernel_type=None,
covariance_type='full', voxel_size=1.0,
out_hier_name='',
sampled_points=1000000, num_iter=100,
simulation_res=1.0,
multiply_by_total_mass=True,
transform=None,
intermediate_map_fn=None,
density_ps_to_copy=None,
use_precomputed_gaussians=False):
'''
Sets up a Gaussian Mixture Model for this component.
Can specify input GMM file or it will be computed.
@param name component name
@param hierarchies set up GMM for some hierarchies
@param selection_tuples (list of tuples) example (first_residue,last_residue,component_name)
@param particles set up GMM for particles directly
@param resolution usual PMI resolution for selecting particles from the hierarchies
@param inputfile read the GMM from this file
@param outputfile fit and write the GMM to this file (text)
@param outputmap after fitting, create GMM density file (mrc)
@param kernel_type for creating the intermediate density (points are sampled to make GMM). Options are IMP.em.GAUSSIAN, IMP.em.SPHERE, and IMP.em.BINARIZED_SPHERE
@param covariance_type for fitting the GMM. options are 'full', 'diagonal' and 'spherical'
@param voxel_size for creating the intermediate density map and output map.
lower number increases accuracy but also rasterizing time grows
@param out_hier_name name of the output density hierarchy
@param sampled_points number of points to sample. more will increase accuracy and fitting time
@param num_iter num GMM iterations. more will increase accuracy and fitting time
@param multiply_by_total_mass multiply the weights of the GMM by this value (only works on creation!)
@param transform for input file only, apply a transformation (eg for multiple copies same GMM)
@param intermediate_map_fn for debugging, this will write the intermediate (simulated) map
@param density_ps_to_copy in case you already created the appropriate GMM (eg, for beads)
@param use_precomputed_gaussians Set this flag and pass fragments - will use roughly spherical Gaussian setup
'''
import numpy as np
import sys
import IMP.em
import IMP.isd.gmm_tools
# prepare output
self.representation_is_modified = True
out_hier = []
protein_h = self.hier_dict[name]
if "Densities" not in self.hier_representation[name]:
root = IMP.atom.Hierarchy.setup_particle(IMP.Particle(self.m))
root.set_name("Densities")
self.hier_representation[name]["Densities"] = root
protein_h.add_child(root)
# gather passed particles
fragment_particles = []
if not particles is None:
fragment_particles += particles
if not hierarchies is None:
fragment_particles += IMP.pmi.tools.select(
self, resolution=resolution,
hierarchies=hierarchies)
if not selection_tuples is None:
for st in selection_tuples:
fragment_particles += IMP.pmi.tools.select_by_tuple(
self, tupleselection=st,
resolution=resolution,
name_is_ambiguous=False)
# compute or read gaussians
density_particles = []
if inputfile:
IMP.isd.gmm_tools.decorate_gmm_from_text(
inputfile, density_particles,
self.m, transform)
elif density_ps_to_copy:
for ip in density_ps_to_copy:
p = IMP.Particle(self.m)
shape = IMP.core.Gaussian(ip).get_gaussian()
mass = IMP.atom.Mass(ip).get_mass()
IMP.core.Gaussian.setup_particle(p, shape)
IMP.atom.Mass.setup_particle(p, mass)
density_particles.append(p)
elif use_precomputed_gaussians:
if len(fragment_particles) == 0:
print("add_component_density: no particle was selected")
return out_hier
for p in fragment_particles:
if not (IMP.atom.Fragment.get_is_setup(self.m,p.get_particle_index()) and
IMP.core.XYZ.get_is_setup(self.m,p.get_particle_index())):
raise Exception("The particles you selected must be Fragments and XYZs")
nres=len(IMP.atom.Fragment(self.m,p.get_particle_index()).get_residue_indexes())
pos=IMP.core.XYZ(self.m,p.get_particle_index()).get_coordinates()
density_particles=[]
try:
IMP.isd.get_data_path("beads/bead_%i.txt"%nres)
except:
raise Exception("We haven't created a bead file for",nres,"residues")
transform = IMP.algebra.Transformation3D(pos)
IMP.isd.gmm_tools.decorate_gmm_from_text(
IMP.isd.get_data_path("beads/bead_%i.txt"%nres), density_particles,
self.m, transform)
else:
#compute the gaussians here
if len(fragment_particles) == 0:
print("add_component_density: no particle was selected")
return out_hier
density_particles = IMP.isd.gmm_tools.sample_and_fit_to_particles(
self.m,
fragment_particles,
num_components,
sampled_points,
simulation_res,
voxel_size,
num_iter,
covariance_type,
multiply_by_total_mass,
outputmap,
outputfile)
# prepare output hierarchy
s0 = IMP.atom.Fragment.setup_particle(IMP.Particle(self.m))
s0.set_name(out_hier_name)
self.hier_representation[name]["Densities"].add_child(s0)
out_hier.append(s0)
for nps, p in enumerate(density_particles):
s0.add_child(p)
p.set_name(s0.get_name() + '_gaussian_%i' % nps)
return out_hier
def get_component_density(self, name):
return self.hier_representation[name]["Densities"]
def add_all_atom_densities(self, name, hierarchies=None,
selection_tuples=None,
particles=None,
resolution=0,
output_map=None,
voxel_size=1.0):
'''Decorates all specified particles as Gaussians directly.
@param name component name
@param hierarchies set up GMM for some hierarchies
@param selection_tuples (list of tuples) example (first_residue,last_residue,component_name)
@param particles set up GMM for particles directly
@param resolution usual PMI resolution for selecting particles from the hierarchies
'''
import IMP.em
import numpy as np
import sys
from math import sqrt
self.representation_is_modified = True
if particles is None:
fragment_particles = []
else:
fragment_particles = particles
if not hierarchies is None:
fragment_particles += IMP.pmi.tools.select(
self, resolution=resolution,
hierarchies=hierarchies)
if not selection_tuples is None:
for st in selection_tuples:
fragment_particles += IMP.pmi.tools.select_by_tuple(
self, tupleselection=st,
resolution=resolution,
name_is_ambiguous=False)
if len(fragment_particles) == 0:
print("add all atom densities: no particle was selected")
return
# create a spherical gaussian for each particle based on atom type
print('setting up all atom gaussians num_particles',len(fragment_particles))
for n,p in enumerate(fragment_particles):
if IMP.core.Gaussian.get_is_setup(p): continue
center=IMP.core.XYZ(p).get_coordinates()
rad=IMP.core.XYZR(p).get_radius()
mass=IMP.atom.Mass(p).get_mass()
trans=IMP.algebra.Transformation3D(IMP.algebra.get_identity_rotation_3d(),center)
shape=IMP.algebra.Gaussian3D(IMP.algebra.ReferenceFrame3D(trans),[rad]*3)
IMP.core.Gaussian.setup_particle(p,shape)
print('setting up particle',p.get_name(), " as individual gaussian particle")
if not output_map is None:
print('writing map to', output_map)
IMP.isd.gmm_tools.write_gmm_to_map(
fragment_particles,
output_map,
voxel_size)
def add_component_hierarchy_clone(self, name, hierarchy):
'''
Make a copy of a hierarchy and append it to a component.
'''
outhier = []
self.representation_is_modified = True
protein_h = self.hier_dict[name]
hierclone = IMP.atom.create_clone(hierarchy)
hierclone.set_name(hierclone.get_name() + "_clone")
protein_h.add_child(hierclone)
outhier.append(hierclone)
psmain = IMP.atom.get_leaves(hierarchy)
psclone = IMP.atom.get_leaves(hierclone)
# copying attributes
for n, pmain in enumerate(psmain):
pclone = psclone[n]
if IMP.pmi.Resolution.get_is_setup(pmain):
resolution = IMP.pmi.Resolution(pmain).get_resolution()
IMP.pmi.Resolution.setup_particle(pclone, resolution)
for kk in IMP.pmi.tools.get_residue_indexes(pclone):
self.hier_db.add_particles(
name,
kk,
IMP.pmi.Resolution(pclone).get_resolution(),
[pclone])
if IMP.pmi.Uncertainty.get_is_setup(pmain):
uncertainty = IMP.pmi.Uncertainty(pmain).get_uncertainty()
IMP.pmi.Uncertainty.setup_particle(pclone, uncertainty)
if IMP.pmi.Symmetric.get_is_setup(pmain):
symmetric = IMP.pmi.Symmetric(pmain).get_symmetric()
IMP.pmi.Symmetric.setup_particle(pclone, symmetric)
return outhier
def dump_particle_descriptors(self):
import numpy
import pickle
import IMP.isd
import IMP.isd.gmm_tools
particles_attributes={}
floppy_body_attributes={}
gaussians=[]
for h in IMP.atom.get_leaves(self.prot):
leaf=h
name=h.get_name()
hroot=self.prot
hparent=h.get_parent()
while hparent != hroot:
hparent=h.get_parent()
name+="|"+hparent.get_name()
h=hparent
particles_attributes[name]={"COORDINATES":numpy.array(IMP.core.XYZR(leaf.get_particle()).get_coordinates()),
"RADIUS":IMP.core.XYZR(leaf.get_particle()).get_radius(),
"MASS":IMP.atom.Mass(leaf.get_particle()).get_mass()}
if IMP.core.Gaussian.get_is_setup(leaf.get_particle()):
gaussians.append(IMP.core.Gaussian(leaf.get_particle()))
rigid_body_attributes={}
for rb in self.rigid_bodies:
name=rb.get_name()
rf=rb.get_reference_frame()
t=rf.get_transformation_to()
trans=t.get_translation()
rot=t.get_rotation()
rigid_body_attributes[name]={"TRANSLATION":numpy.array(trans),
"ROTATION":numpy.array(rot.get_quaternion()),
"COORDINATES_NONRIGID_MEMBER":{},
"COORDINATES_RIGID_MEMBER":{}}
for mi in rb.get_member_indexes():
rm=self.m.get_particle(mi)
if IMP.core.NonRigidMember.get_is_setup(rm):
name_part=rm.get_name()
xyz=[self.m.get_attribute(fk, rm) for fk in [IMP.FloatKey(4), IMP.FloatKey(5), IMP.FloatKey(6)]]
rigid_body_attributes[name]["COORDINATES_NONRIGID_MEMBER"][name_part]=numpy.array(xyz)
else:
name_part=rm.get_name()
xyz=IMP.core.XYZ(rm).get_coordinates()
rigid_body_attributes[name]["COORDINATES_RIGID_MEMBER"][name_part]=numpy.array(xyz)
IMP.isd.gmm_tools.write_gmm_to_text(gaussians,"model_gmm.txt")
pickle.dump(particles_attributes,
open("particles_attributes.pkl", "wb"))
pickle.dump(rigid_body_attributes,
open("rigid_body_attributes.pkl", "wb"))
def load_particle_descriptors(self):
import numpy
import pickle
import IMP.isd
import IMP.isd.gmm_tools
particles_attributes = pickle.load(open("particles_attributes.pkl",
"rb"))
rigid_body_attributes = pickle.load(open("rigid_body_attributes.pkl",
"rb"))
particles=[]
hierarchies=[]
gaussians=[]
for h in IMP.atom.get_leaves(self.prot):
leaf=h
name=h.get_name()
hroot=self.prot
hparent=h.get_parent()
while hparent != hroot:
hparent=h.get_parent()
name+="|"+hparent.get_name()
h=hparent
xyzr=IMP.core.XYZR(leaf.get_particle())
xyzr.set_coordinates(particles_attributes[name]["COORDINATES"])
#xyzr.set_radius(particles_attributes[name]["RADIUS"])
#IMP.atom.Mass(leaf.get_particle()).set_mass(particles_attributes[name]["MASS"])
if IMP.core.Gaussian.get_is_setup(leaf.get_particle()):
gaussians.append(IMP.core.Gaussian(leaf.get_particle()))
for rb in self.rigid_bodies:
name=rb.get_name()
trans=rigid_body_attributes[name]["TRANSLATION"]
rot=rigid_body_attributes[name]["ROTATION"]
t=IMP.algebra.Transformation3D(IMP.algebra.Rotation3D(rot),trans)
rf=IMP.algebra.ReferenceFrame3D(t)
rb.set_reference_frame(rf)
coor_nrm_ref=rigid_body_attributes[name]["COORDINATES_NONRIGID_MEMBER"]
coor_rm_ref_dict=rigid_body_attributes[name]["COORDINATES_RIGID_MEMBER"]
coor_rm_model=[]
coor_rm_ref=[]
for mi in rb.get_member_indexes():
rm=self.m.get_particle(mi)
if IMP.core.NonRigidMember.get_is_setup(rm):
name_part=rm.get_name()
xyz=coor_nrm_ref[name_part]
for n,fk in enumerate([IMP.FloatKey(4), IMP.FloatKey(5), IMP.FloatKey(6)]):
self.m.set_attribute(fk, rm,xyz[n])
else:
name_part=rm.get_name()
coor_rm_ref.append(IMP.algebra.Vector3D(coor_rm_ref_dict[name_part]))
coor_rm_model.append(IMP.core.XYZ(rm).get_coordinates())
if len(coor_rm_model)==0: continue
t=IMP.algebra.get_transformation_aligning_first_to_second(coor_rm_model,coor_rm_ref)
IMP.core.transform(rb,t)
IMP.isd.gmm_tools.decorate_gmm_from_text("model_gmm.txt",gaussians,self.m)
def _compare_rmf_repr_names(self, rmfname, reprname, component_name):
"""Print a warning if particle names in RMF and model don't match"""
def match_any_suffix():
# Handle common mismatches like 743 != Nup85_743_pdb
suffixes = ["pdb", "bead_floppy_body_rigid_body_member_floppy_body",
"bead_floppy_body_rigid_body_member",
"bead_floppy_body"]
for s in suffixes:
if "%s_%s_%s" % (component_name, rmfname, s) == reprname:
return True
if rmfname != reprname and not match_any_suffix():
print("set_coordinates_from_rmf: WARNING rmf particle and "
"representation particle names don't match %s %s"
% (rmfname, reprname))
def set_coordinates_from_rmf(self, component_name, rmf_file_name,
rmf_frame_number,
rmf_component_name=None,
check_number_particles=True,
representation_name_to_rmf_name_map=None,
state_number=0,
skip_gaussian_in_rmf=False,
skip_gaussian_in_representation=False,
save_file=False,
force_rigid_update=False):
'''Read and replace coordinates from an RMF file.
Replace the coordinates of particles with the same name.
It assumes that the RMF and the representation have the particles
in the same order.
@param component_name Component name
@param rmf_component_name Name of the component in the RMF file
(if not specified, use `component_name`)
@param representation_name_to_rmf_name_map a dictionary that map
the original rmf particle name to the recipient particle component name
@param save_file: save a file with the names of particles of the component
@param force_rigid_update: update the coordinates of rigid bodies
(normally this should be called before rigid bodies are set up)
'''
import IMP.pmi.analysis
prots = IMP.pmi.analysis.get_hiers_from_rmf(
self.m,
rmf_frame_number,
rmf_file_name)
if not prots:
raise ValueError("cannot read hierarchy from rmf")
prot=prots[0]
# Make sure coordinates of rigid body members in the RMF are correct
if force_rigid_update:
self.m.update()
# if len(self.rigid_bodies)!=0:
# print "set_coordinates_from_rmf: cannot proceed if rigid bodies were initialized. Use the function before defining the rigid bodies"
# exit()
allpsrmf = IMP.atom.get_leaves(prot)
psrmf = []
for p in allpsrmf:
(protname, is_a_bead) = IMP.pmi.tools.get_prot_name_from_particle(
p, self.hier_dict.keys())
if (protname is None) and (rmf_component_name is not None):
(protname, is_a_bead) = IMP.pmi.tools.get_prot_name_from_particle(
p, rmf_component_name)
if (skip_gaussian_in_rmf):
if (IMP.core.Gaussian.get_is_setup(p)) and not (IMP.atom.Fragment.get_is_setup(p) or IMP.atom.Residue.get_is_setup(p)):
continue
if (rmf_component_name is not None) and (protname == rmf_component_name):
psrmf.append(p)
elif (rmf_component_name is None) and (protname == component_name):
psrmf.append(p)
psrepr = IMP.atom.get_leaves(self.hier_dict[component_name])
if (skip_gaussian_in_representation):
allpsrepr = psrepr
psrepr = []
for p in allpsrepr:
#(protname, is_a_bead) = IMP.pmi.tools.get_prot_name_from_particle(
# p, self.hier_dict.keys())
if (IMP.core.Gaussian.get_is_setup(p)) and not (IMP.atom.Fragment.get_is_setup(p) or IMP.atom.Residue.get_is_setup(p)):
continue
psrepr.append(p)
import itertools
reprnames=[p.get_name() for p in psrepr]
rmfnames=[p.get_name() for p in psrmf]
if save_file:
fl=open(component_name+".txt","w")
for i in itertools.izip_longest(reprnames,rmfnames): fl.write(str(i[0])+","+str(i[1])+"\n")
if check_number_particles and not representation_name_to_rmf_name_map:
if len(psrmf) != len(psrepr):
fl=open(component_name+".txt","w")
for i in itertools.izip_longest(reprnames,rmfnames): fl.write(str(i[0])+","+str(i[1])+"\n")
raise ValueError("%s cannot proceed the rmf and the representation don't have the same number of particles; "
"particles in rmf: %s particles in the representation: %s" % (str(component_name), str(len(psrmf)), str(len(psrepr))))
if not representation_name_to_rmf_name_map:
for n, prmf in enumerate(psrmf):
prmfname = prmf.get_name()
preprname = psrepr[n].get_name()
if force_rigid_update:
if IMP.core.RigidBody.get_is_setup(psrepr[n]) \
and not IMP.core.RigidBodyMember.get_is_setup(psrepr[n]):
continue
else:
if IMP.core.RigidBodyMember.get_is_setup(psrepr[n]):
raise ValueError("component %s cannot proceed if rigid bodies were initialized. Use the function before defining the rigid bodies" % component_name)
self._compare_rmf_repr_names(prmfname, preprname,
component_name)
if IMP.core.XYZ.get_is_setup(prmf) and IMP.core.XYZ.get_is_setup(psrepr[n]):
xyz = IMP.core.XYZ(prmf).get_coordinates()
IMP.core.XYZ(psrepr[n]).set_coordinates(xyz)
if IMP.core.RigidBodyMember.get_is_setup(psrepr[n]):
# Set rigid body so that coordinates are preserved
# on future model updates
rbm = IMP.core.RigidBodyMember(psrepr[n])
rbm.set_internal_coordinates(xyz)
tr = IMP.algebra.ReferenceFrame3D()
rb = rbm.get_rigid_body()
if IMP.core.RigidBodyMember.get_is_setup(rb):
raise ValueError("Cannot handle nested "
"rigid bodies yet")
rb.set_reference_frame_lazy(tr)
else:
print("set_coordinates_from_rmf: WARNING particles are not XYZ decorated %s %s " % (str(IMP.core.XYZ.get_is_setup(prmf)), str(IMP.core.XYZ.get_is_setup(psrepr[n]))))
if IMP.core.Gaussian.get_is_setup(prmf) and IMP.core.Gaussian.get_is_setup(psrepr[n]):
gprmf=IMP.core.Gaussian(prmf)
grepr=IMP.core.Gaussian(psrepr[n])
g=gprmf.get_gaussian()
grepr.set_gaussian(g)
else:
repr_name_particle_map={}
rmf_name_particle_map={}
for p in psrmf:
rmf_name_particle_map[p.get_name()]=p
#for p in psrepr:
# repr_name_particle_map[p.get_name()]=p
for prepr in psrepr:
try:
prmf=rmf_name_particle_map[representation_name_to_rmf_name_map[prepr.get_name()]]
except KeyError:
print("set_coordinates_from_rmf: WARNING missing particle name in representation_name_to_rmf_name_map, skipping...")
continue
xyz = IMP.core.XYZ(prmf).get_coordinates()
IMP.core.XYZ(prepr).set_coordinates(xyz)
def check_root(self, name, protein_h, resolution):
'''
If the root hierarchy does not exist, construct it.
'''
if "Res:" + str(int(resolution)) not in self.hier_representation[name]:
root = IMP.atom.Hierarchy.setup_particle(IMP.Particle(self.m))
root.set_name(name + "_Res:" + str(int(resolution)))
self.hier_representation[name][
"Res:" + str(int(resolution))] = root
protein_h.add_child(root)
def coarse_hierarchy(self, name, start, end, resolutions, isnucleicacid,
input_hierarchy, protein_h, type, color):
'''
Generate all needed coarse grained layers.
@param name name of the protein
@param resolutions list of resolutions
@param protein_h root hierarchy
@param input_hierarchy hierarchy to coarse grain
@param type a string, typically "pdb" or "helix"
'''
outhiers = []
if (1 in resolutions) or (0 in resolutions):
# in that case create residues and append atoms
if 1 in resolutions:
self.check_root(name, protein_h, 1)
s1 = IMP.atom.Fragment.setup_particle(IMP.Particle(self.m))
s1.set_name('%s_%i-%i_%s' % (name, start, end, type))
# s1.set_residue_indexes(range(start,end+1))
self.hier_representation[name]["Res:1"].add_child(s1)
outhiers += [s1]
if 0 in resolutions:
self.check_root(name, protein_h, 0)
s0 = IMP.atom.Fragment.setup_particle(IMP.Particle(self.m))
s0.set_name('%s_%i-%i_%s' % (name, start, end, type))
# s0.set_residue_indexes(range(start,end+1))
self.hier_representation[name]["Res:0"].add_child(s0)
outhiers += [s0]
if not isnucleicacid:
sel = IMP.atom.Selection(
input_hierarchy,
atom_type=IMP.atom.AT_CA)
else:
sel = IMP.atom.Selection(
input_hierarchy,
atom_type=IMP.atom.AT_P)
for p in sel.get_selected_particles():
resobject = IMP.atom.Residue(IMP.atom.Atom(p).get_parent())
if 0 in resolutions:
# if you ask for atoms
resclone0 = IMP.atom.create_clone(resobject)
resindex = IMP.atom.Residue(resclone0).get_index()
s0.add_child(resclone0)
self.hier_db.add_particles(
name,
resindex,
0,
resclone0.get_children())
chil = resclone0.get_children()
for ch in chil:
IMP.pmi.Resolution.setup_particle(ch, 0)
try:
clr = IMP.display.get_rgb_color(color)
except:
clr = IMP.display.get_rgb_color(1.0)
IMP.display.Colored.setup_particle(ch, clr)
if 1 in resolutions:
# else clone the residue
resclone1 = IMP.atom.create_clone_one(resobject)
resindex = IMP.atom.Residue(resclone1).get_index()
s1.add_child(resclone1)
self.hier_db.add_particles(name, resindex, 1, [resclone1])
rt = IMP.atom.Residue(resclone1).get_residue_type()
xyz = IMP.core.XYZ(p).get_coordinates()
prt = resclone1.get_particle()
prt.set_name('%s_%i_%s' % (name, resindex, type))
IMP.core.XYZ.setup_particle(prt).set_coordinates(xyz)
try:
vol = IMP.atom.get_volume_from_residue_type(rt)
# mass=IMP.atom.get_mass_from_residue_type(rt)
except IMP.ValueException:
vol = IMP.atom.get_volume_from_residue_type(
IMP.atom.ResidueType("ALA"))
# mass=IMP.atom.get_mass_from_residue_type(IMP.atom.ResidueType("ALA"))
radius = IMP.algebra.get_ball_radius_from_volume_3d(vol)
IMP.core.XYZR.setup_particle(prt).set_radius(radius)
IMP.atom.Mass.setup_particle(prt, 100)
IMP.pmi.Uncertainty.setup_particle(prt, radius)
IMP.pmi.Symmetric.setup_particle(prt, 0)
IMP.pmi.Resolution.setup_particle(prt, 1)
try:
clr = IMP.display.get_rgb_color(color)
except:
clr = IMP.display.get_rgb_color(1.0)
IMP.display.Colored.setup_particle(prt, clr)
for r in resolutions:
if r != 0 and r != 1:
self.check_root(name, protein_h, r)
s = IMP.atom.create_simplified_along_backbone(
input_hierarchy,
r)
chil = s.get_children()
s0 = IMP.atom.Fragment.setup_particle(IMP.Particle(self.m))
s0.set_name('%s_%i-%i_%s' % (name, start, end, type))
# Move all children from s to s0
for ch in chil:
s.remove_child(ch)
s0.add_child(ch)
self.hier_representation[name][
"Res:" + str(int(r))].add_child(s0)
outhiers += [s0]
IMP.atom.destroy(s)
for prt in IMP.atom.get_leaves(s0):
ri = IMP.atom.Fragment(prt).get_residue_indexes()
first = ri[0]
last = ri[-1]
if first == last:
prt.set_name('%s_%i_%s' % (name, first, type))
else:
prt.set_name('%s_%i_%i_%s' % (name, first, last, type))
for kk in ri:
self.hier_db.add_particles(name, kk, r, [prt])
radius = IMP.core.XYZR(prt).get_radius()
IMP.pmi.Uncertainty.setup_particle(prt, radius)
IMP.pmi.Symmetric.setup_particle(prt, 0)
IMP.pmi.Resolution.setup_particle(prt, r)
# setting up color for each particle in the
# hierarchy, if colors missing in the colors list set it to
# red
try:
clr = IMP.display.get_rgb_color(color)
except:
colors.append(1.0)
clr = IMP.display.get_rgb_color(colors[pdb_part_count])
IMP.display.Colored.setup_particle(prt, clr)
return outhiers
def get_hierarchies_at_given_resolution(self, resolution):
'''
Get the hierarchies at the given resolution.
The map between resolution and hierarchies is cached to accelerate
the selection algorithm. The cache is invalidated when the
representation was changed by any add_component_xxx.
'''
if self.representation_is_modified:
rhbr = self.hier_db.get_all_root_hierarchies_by_resolution(
resolution)
self.hier_resolution[resolution] = rhbr
self.representation_is_modified = False
return rhbr
else:
if resolution in self.hier_resolution:
return self.hier_resolution[resolution]
else:
rhbr = self.hier_db.get_all_root_hierarchies_by_resolution(
resolution)
self.hier_resolution[resolution] = rhbr
return rhbr
def shuffle_configuration(
self, max_translation=300., max_rotation=2.0 * pi,
avoidcollision=True, cutoff=10.0, niterations=100,
bounding_box=None,
excluded_rigid_bodies=None,
ignore_initial_coordinates=False,
hierarchies_excluded_from_collision=None):
'''
Shuffle configuration; used to restart the optimization.
The configuration of the system is initialized by placing each
rigid body and each bead randomly in a box with a side of
`max_translation` angstroms, and far enough from each other to
prevent any steric clashes. The rigid bodies are also randomly rotated.
@param avoidcollision check if the particle/rigid body was
placed close to another particle; uses the optional
arguments cutoff and niterations
@param bounding_box defined by ((x1,y1,z1),(x2,y2,z2))
'''
if excluded_rigid_bodies is None:
excluded_rigid_bodies = []
if hierarchies_excluded_from_collision is None:
hierarchies_excluded_from_collision = []
if len(self.rigid_bodies) == 0:
print("shuffle_configuration: rigid bodies were not intialized")
gcpf = IMP.core.GridClosePairsFinder()
gcpf.set_distance(cutoff)
ps = []
hierarchies_excluded_from_collision_indexes = []
for p in IMP.atom.get_leaves(self.prot):
if IMP.core.XYZ.get_is_setup(p):
ps.append(p)
if IMP.core.Gaussian.get_is_setup(p):
# remove the densities particles out of the calculation
hierarchies_excluded_from_collision_indexes += IMP.get_indexes([p])
allparticleindexes = IMP.get_indexes(ps)
if bounding_box is not None:
((x1, y1, z1), (x2, y2, z2)) = bounding_box
lb = IMP.algebra.Vector3D(x1, y1, z1)
ub = IMP.algebra.Vector3D(x2, y2, z2)
bb = IMP.algebra.BoundingBox3D(lb, ub)
for h in hierarchies_excluded_from_collision:
hierarchies_excluded_from_collision_indexes += IMP.get_indexes(IMP.atom.get_leaves(h))
allparticleindexes = list(
set(allparticleindexes) - set(hierarchies_excluded_from_collision_indexes))
print(hierarchies_excluded_from_collision)
print(len(allparticleindexes),len(hierarchies_excluded_from_collision_indexes))
print('shuffling', len(self.rigid_bodies) - len(excluded_rigid_bodies), 'rigid bodies')
for rb in self.rigid_bodies:
if rb not in excluded_rigid_bodies:
if avoidcollision:
rbindexes = rb.get_member_particle_indexes()
rbindexes = list(
set(rbindexes) - set(hierarchies_excluded_from_collision_indexes))
otherparticleindexes = list(
set(allparticleindexes) - set(rbindexes))
if len(otherparticleindexes) is None:
continue
niter = 0
while niter < niterations:
if (ignore_initial_coordinates):
# Move the particle to the origin
transformation = IMP.algebra.Transformation3D(IMP.algebra.get_identity_rotation_3d(), -IMP.core.XYZ(rb).get_coordinates())
IMP.core.transform(rb, transformation)
rbxyz = IMP.core.XYZ(rb).get_coordinates()
if bounding_box is not None:
# overrides the perturbation
translation = IMP.algebra.get_random_vector_in(bb)
rotation = IMP.algebra.get_random_rotation_3d()
transformation = IMP.algebra.Transformation3D(rotation, translation-rbxyz)
else:
transformation = IMP.algebra.get_random_local_transformation(
rbxyz,
max_translation,
max_rotation)
IMP.core.transform(rb, transformation)
if avoidcollision:
self.m.update()
npairs = len(
gcpf.get_close_pairs(
self.m,
otherparticleindexes,
rbindexes))
if npairs == 0:
niter = niterations
if (ignore_initial_coordinates):
print (rb.get_name(), IMP.core.XYZ(rb).get_coordinates())
else:
niter += 1
print("shuffle_configuration: rigid body placed close to other %d particles, trying again..." % npairs)
print("shuffle_configuration: rigid body name: " + rb.get_name())
if niter == niterations:
raise ValueError("tried the maximum number of iterations to avoid collisions, increase the distance cutoff")
else:
break
print('shuffling', len(self.floppy_bodies), 'floppy bodies')
for fb in self.floppy_bodies:
if (avoidcollision):
rm = not IMP.core.RigidMember.get_is_setup(fb)
nrm = not IMP.core.NonRigidMember.get_is_setup(fb)
if rm and nrm:
fbindexes = IMP.get_indexes([fb])
otherparticleindexes = list(
set(allparticleindexes) - set(fbindexes))
if len(otherparticleindexes) is None:
continue
else:
continue
else:
rm = IMP.core.RigidMember.get_is_setup(fb)
nrm = IMP.core.NonRigidMember.get_is_setup(fb)
if (rm or nrm):
continue
if IMP.core.RigidBodyMember.get_is_setup(fb):
d=IMP.core.RigidBodyMember(fb).get_rigid_body()
elif IMP.core.RigidBody.get_is_setup(fb):
d=IMP.core.RigidBody(fb)
elif IMP.core.XYZ.get_is_setup(fb):
d=IMP.core.XYZ(fb)
niter = 0
while niter < niterations:
if (ignore_initial_coordinates):
# Move the particle to the origin
transformation = IMP.algebra.Transformation3D(IMP.algebra.get_identity_rotation_3d(), -IMP.core.XYZ(fb).get_coordinates())
IMP.core.transform(d, transformation)
fbxyz = IMP.core.XYZ(fb).get_coordinates()
if bounding_box is not None:
# overrides the perturbation
translation = IMP.algebra.get_random_vector_in(bb)
transformation = IMP.algebra.Transformation3D(translation-fbxyz)
else:
transformation = IMP.algebra.get_random_local_transformation(
fbxyz,
max_translation,
max_rotation)
IMP.core.transform(d, transformation)
if (avoidcollision):
self.m.update()
npairs = len(
gcpf.get_close_pairs(
self.m,
otherparticleindexes,
fbindexes))
if npairs == 0:
niter = niterations
if (ignore_initial_coordinates):
print (fb.get_name(), IMP.core.XYZ(fb).get_coordinates())
else:
niter += 1
print("shuffle_configuration: floppy body placed close to other %d particles, trying again..." % npairs)
print("shuffle_configuration: floppy body name: " + fb.get_name())
if niter == niterations:
raise ValueError("tried the maximum number of iterations to avoid collisions, increase the distance cutoff")
else:
break
def set_current_coordinates_as_reference_for_rmsd(self, label="None"):
# getting only coordinates from pdb
ps = IMP.pmi.tools.select(self, resolution=1.0)
# storing the reference coordinates and the particles
self.reference_structures[label] = (
[IMP.core.XYZ(p).get_coordinates() for p in ps],
ps)
def get_all_rmsds(self):
rmsds = {}
for label in self.reference_structures:
current_coordinates = [IMP.core.XYZ(p).get_coordinates()
for p in self.reference_structures[label][1]]
reference_coordinates = self.reference_structures[label][0]
if len(reference_coordinates) != len(current_coordinates):
print("calculate_all_rmsds: reference and actual coordinates are not the same")
continue
transformation = IMP.algebra.get_transformation_aligning_first_to_second(
current_coordinates,
reference_coordinates)
rmsd_global = IMP.algebra.get_rmsd(
reference_coordinates,
current_coordinates)
# warning: temporary we are calculating the drms, and not the rmsd,
# for the relative distance
rmsd_relative = IMP.atom.get_drms(
reference_coordinates,
current_coordinates)
rmsds[label + "_GlobalRMSD"] = rmsd_global
rmsds[label + "_RelativeDRMS"] = rmsd_relative
return rmsds
def setup_component_geometry(self, name, color=None, resolution=1.0):
if color is None:
color = self.color_dict[name]
# this function stores all particle pairs
# ordered by residue number, to be used
# to construct backbone traces
self.hier_geometry_pairs[name] = []
protein_h = self.hier_dict[name]
pbr = IMP.pmi.tools.select(self, name=name, resolution=resolution)
pbr = IMP.pmi.tools.sort_by_residues(pbr)
for n in range(len(pbr) - 1):
self.hier_geometry_pairs[name].append((pbr[n], pbr[n + 1], color))
def setup_component_sequence_connectivity(
self, name, resolution=10, scale=1.0):
'''
Generate restraints between contiguous fragments in the hierarchy.
The linkers are generated at resolution 10 by default.
'''
unmodeledregions_cr = IMP.RestraintSet(self.m, "unmodeledregions")
sortedsegments_cr = IMP.RestraintSet(self.m, "sortedsegments")
protein_h = self.hier_dict[name]
SortedSegments = []
frs = self.hier_db.get_preroot_fragments_by_resolution(
name,
resolution)
for fr in frs:
try:
start = fr.get_children()[0]
except:
start = fr
try:
end = fr.get_children()[-1]
except:
end = fr
startres = IMP.pmi.tools.get_residue_indexes(start)[0]
endres = IMP.pmi.tools.get_residue_indexes(end)[-1]
SortedSegments.append((start, end, startres))
SortedSegments = sorted(SortedSegments, key=itemgetter(2))
# connect the particles
for x in range(len(SortedSegments) - 1):
last = SortedSegments[x][1]
first = SortedSegments[x + 1][0]
nreslast = len(IMP.pmi.tools.get_residue_indexes(last))
lastresn = IMP.pmi.tools.get_residue_indexes(last)[-1]
nresfirst = len(IMP.pmi.tools.get_residue_indexes(first))
firstresn = IMP.pmi.tools.get_residue_indexes(first)[0]
residuegap = firstresn - lastresn - 1
if self.disorderedlength and (nreslast / 2 + nresfirst / 2 + residuegap) > 20.0:
# calculate the distance between the sphere centers using Kohn
# PNAS 2004
optdist = sqrt(5 / 3) * 1.93 * \
(nreslast / 2 + nresfirst / 2 + residuegap) ** 0.6
# optdist2=sqrt(5/3)*1.93*((nreslast)**0.6+(nresfirst)**0.6)/2
if self.upperharmonic:
hu = IMP.core.HarmonicUpperBound(optdist, self.kappa)
else:
hu = IMP.core.Harmonic(optdist, self.kappa)
dps = IMP.core.DistancePairScore(hu)
else: # default
optdist = (0.0 + (float(residuegap) + 1.0) * 3.6) * scale
if self.upperharmonic: # default
hu = IMP.core.HarmonicUpperBound(optdist, self.kappa)
else:
hu = IMP.core.Harmonic(optdist, self.kappa)
dps = IMP.core.SphereDistancePairScore(hu)
pt0 = last.get_particle()
pt1 = first.get_particle()
r = IMP.core.PairRestraint(self.m, dps, (pt0.get_index(), pt1.get_index()))
print("Adding sequence connectivity restraint between", pt0.get_name(), " and ", pt1.get_name(), 'of distance', optdist)
sortedsegments_cr.add_restraint(r)
self.linker_restraints_dict[
"LinkerRestraint-" + pt0.get_name() + "-" + pt1.get_name()] = r
self.connected_intra_pairs.append((pt0, pt1))
self.connected_intra_pairs.append((pt1, pt0))
self.linker_restraints.add_restraint(sortedsegments_cr)
self.linker_restraints.add_restraint(unmodeledregions_cr)
IMP.pmi.tools.add_restraint_to_model(self.m, self.linker_restraints)
self.sortedsegments_cr_dict[name] = sortedsegments_cr
self.unmodeledregions_cr_dict[name] = unmodeledregions_cr
def optimize_floppy_bodies(self, nsteps, temperature=1.0):
import IMP.pmi.samplers
pts = IMP.pmi.tools.ParticleToSampleList()
for n, fb in enumerate(self.floppy_bodies):
pts.add_particle(fb, "Floppy_Bodies", 1.0, "Floppy_Body_" + str(n))
if len(pts.get_particles_to_sample()) > 0:
mc = IMP.pmi.samplers.MonteCarlo(self.m, [pts], temperature)
print("optimize_floppy_bodies: optimizing %i floppy bodies" % len(self.floppy_bodies))
mc.optimize(nsteps)
else:
print("optimize_floppy_bodies: no particle to optimize")
def create_rotational_symmetry(self, maincopy, copies, rotational_axis=IMP.algebra.Vector3D(0, 0, 1.0),
nSymmetry=None, skip_gaussian_in_clones=False):
'''
The copies must not contain rigid bodies.
The symmetry restraints are applied at each leaf.
'''
from math import pi
self.representation_is_modified = True
ncopies = len(copies) + 1
main_hiers = IMP.atom.get_leaves(self.hier_dict[maincopy])
for k in range(len(copies)):
if (nSymmetry is None):
rotation_angle = 2.0 * pi / float(ncopies) * float(k + 1)
else:
if ( k % 2 == 0 ):
rotation_angle = 2.0 * pi / float(nSymmetry) * float((k + 2) / 2)
else:
rotation_angle = -2.0 * pi / float(nSymmetry) * float((k + 1) / 2)
rotation3D = IMP.algebra.get_rotation_about_axis(rotational_axis, rotation_angle)
sm = IMP.core.TransformationSymmetry(rotation3D)
clone_hiers = IMP.atom.get_leaves(self.hier_dict[copies[k]])
lc = IMP.container.ListSingletonContainer(self.m)
for n, p in enumerate(main_hiers):
if (skip_gaussian_in_clones):
if (IMP.core.Gaussian.get_is_setup(p)) and not (IMP.atom.Fragment.get_is_setup(p) or IMP.atom.Residue.get_is_setup(p)):
continue
pc = clone_hiers[n]
#print("setting " + p.get_name() + " as reference for " + pc.get_name())
IMP.core.Reference.setup_particle(pc.get_particle(), p.get_particle())
lc.add(pc.get_particle().get_index())
c = IMP.container.SingletonsConstraint(sm, None, lc)
self.m.add_score_state(c)
print("Completed setting " + str(maincopy) + " as a reference for " + str(copies[k]) \
+ " by rotating it in " + str(rotation_angle / 2.0 / pi * 360) + " degree around the " + str(rotational_axis) + " axis.")
self.m.update()
def create_rigid_body_symmetry(self, particles_reference, particles_copy,label="None",
initial_transformation=IMP.algebra.get_identity_transformation_3d()):
from math import pi
self.representation_is_modified = True
mainparticles = particles_reference
t=initial_transformation
p=IMP.Particle(self.m)
p.set_name("RigidBody_Symmetry")
rb=IMP.core.RigidBody.setup_particle(p,IMP.algebra.ReferenceFrame3D(t))
sm = IMP.core.TransformationSymmetry(rb)
copyparticles = particles_copy
mainpurged = []
copypurged = []
for n, p in enumerate(mainparticles):
print(p.get_name())
pc = copyparticles[n]
mainpurged.append(p)
if not IMP.pmi.Symmetric.get_is_setup(p):
IMP.pmi.Symmetric.setup_particle(p, 0)
else:
IMP.pmi.Symmetric(p).set_symmetric(0)
copypurged.append(pc)
if not IMP.pmi.Symmetric.get_is_setup(pc):
IMP.pmi.Symmetric.setup_particle(pc, 1)
else:
IMP.pmi.Symmetric(pc).set_symmetric(1)
lc = IMP.container.ListSingletonContainer(self.m)
for n, p in enumerate(mainpurged):
pc = copypurged[n]
print("setting " + p.get_name() + " as reference for " + pc.get_name())
IMP.core.Reference.setup_particle(pc, p)
lc.add(pc.get_index())
c = IMP.container.SingletonsConstraint(sm, None, lc)
self.m.add_score_state(c)
self.m.update()
self.rigid_bodies.append(rb)
self.rigid_body_symmetries.append(rb)
rb.set_name(label+".rigid_body_symmetry."+str(len(self.rigid_body_symmetries)))
def create_amyloid_fibril_symmetry(self, maincopy, axial_copies,
longitudinal_copies, axis=(0, 0, 1), translation_value=4.8):
from math import pi
self.representation_is_modified = True
outhiers = []
protein_h = self.hier_dict[maincopy]
mainparts = IMP.atom.get_leaves(protein_h)
for ilc in range(-longitudinal_copies, longitudinal_copies + 1):
for iac in range(axial_copies):
copyname = maincopy + "_a" + str(ilc) + "_l" + str(iac)
self.create_component(copyname, 0.0)
for hier in protein_h.get_children():
self.add_component_hierarchy_clone(copyname, hier)
copyhier = self.hier_dict[copyname]
outhiers.append(copyhier)
copyparts = IMP.atom.get_leaves(copyhier)
rotation3D = IMP.algebra.get_rotation_about_axis(
IMP.algebra.Vector3D(axis),
2 * pi / axial_copies * (float(iac)))
translation_vector = tuple(
[translation_value * float(ilc) * x for x in axis])
print(translation_vector)
translation = IMP.algebra.Vector3D(translation_vector)
sm = IMP.core.TransformationSymmetry(
IMP.algebra.Transformation3D(rotation3D, translation))
lc = IMP.container.ListSingletonContainer(self.m)
for n, p in enumerate(mainparts):
pc = copyparts[n]
if not IMP.pmi.Symmetric.get_is_setup(p):
IMP.pmi.Symmetric.setup_particle(p, 0)
if not IMP.pmi.Symmetric.get_is_setup(pc):
IMP.pmi.Symmetric.setup_particle(pc, 1)
IMP.core.Reference.setup_particle(pc, p)
lc.add(pc.get_index())
c = IMP.container.SingletonsConstraint(sm, None, lc)
self.m.add_score_state(c)
self.m.update()
return outhiers
def link_components_to_rmf(self, rmfname, frameindex):
'''
Load coordinates in the current representation.
This should be done only if the hierarchy self.prot is identical
to the one as stored in the rmf i.e. all components were added.
'''
import IMP.rmf
import RMF
rh = RMF.open_rmf_file_read_only(rmfname)
IMP.rmf.link_hierarchies(rh, [self.prot])
IMP.rmf.load_frame(rh, RMF.FrameID(frameindex))
del rh
def create_components_from_rmf(self, rmfname, frameindex):
'''
still not working.
create the representation (i.e. hierarchies) from the rmf file.
it will be stored in self.prot, which will be overwritten.
load the coordinates from the rmf file at frameindex.
'''
rh = RMF.open_rmf_file_read_only(rmfname)
self.prot = IMP.rmf.create_hierarchies(rh, self.m)[0]
IMP.atom.show_molecular_hierarchy(self.prot)
IMP.rmf.link_hierarchies(rh, [self.prot])
IMP.rmf.load_frame(rh, RMF.FrameID(frameindex))
del rh
for p in self.prot.get_children():
self.create_component(p.get_name())
self.hier_dict[p.get_name()] = p
'''
still missing: save rigid bodies contained in the rmf in self.rigid_bodies
save floppy bodies in self.floppy_bodies
get the connectivity restraints
'''
def set_rigid_body_from_hierarchies(self, hiers, particles=None):
'''
Construct a rigid body from hierarchies (and optionally particles).
@param hiers list of hierarchies to make rigid
@param particles list of particles to add to the rigid body
'''
if particles is None:
rigid_parts = set()
else:
rigid_parts = set(particles)
name = ""
print("set_rigid_body_from_hierarchies> setting up a new rigid body")
for hier in hiers:
ps = IMP.atom.get_leaves(hier)
for p in ps:
if IMP.core.RigidMember.get_is_setup(p):
rb = IMP.core.RigidMember(p).get_rigid_body()
print("set_rigid_body_from_hierarchies> WARNING particle %s already belongs to rigid body %s" % (p.get_name(), rb.get_name()))
else:
rigid_parts.add(p)
name += hier.get_name() + "-"
print("set_rigid_body_from_hierarchies> adding %s to the rigid body" % hier.get_name())
if len(list(rigid_parts)) != 0:
rb = IMP.atom.create_rigid_body(list(rigid_parts))
rb.set_coordinates_are_optimized(True)
rb.set_name(name + "rigid_body")
self.rigid_bodies.append(rb)
return rb
else:
print("set_rigid_body_from_hierarchies> rigid body could not be setup")
def set_rigid_bodies(self, subunits):
'''
Construct a rigid body from a list of subunits.
Each subunit is a tuple that identifies the residue ranges and the
component name (as used in create_component()).
subunits: [(name_1,(first_residue_1,last_residue_1)),(name_2,(first_residue_2,last_residue_2)),.....]
or
[name_1,name_2,(name_3,(first_residue_3,last_residue_3)),.....]
example: ["prot1","prot2",("prot3",(1,10))]
sometimes, we know about structure of an interaction
and here we make such PPIs rigid
'''
rigid_parts = set()
for s in subunits:
if type(s) == type(tuple()) and len(s) == 2:
sel = IMP.atom.Selection(
self.prot,
molecule=s[0],
residue_indexes=list(range(s[1][0],
s[1][1] + 1)))
if len(sel.get_selected_particles()) == 0:
print("set_rigid_bodies: selected particle does not exist")
for p in sel.get_selected_particles():
# if not p in self.floppy_bodies:
if IMP.core.RigidMember.get_is_setup(p):
rb = IMP.core.RigidMember(p).get_rigid_body()
print("set_rigid_body_from_hierarchies> WARNING particle %s already belongs to rigid body %s" % (p.get_name(), rb.get_name()))
else:
rigid_parts.add(p)
elif type(s) == type(str()):
sel = IMP.atom.Selection(self.prot, molecule=s)
if len(sel.get_selected_particles()) == 0:
print("set_rigid_bodies: selected particle does not exist")
for p in sel.get_selected_particles():
# if not p in self.floppy_bodies:
if IMP.core.RigidMember.get_is_setup(p):
rb = IMP.core.RigidMember(p).get_rigid_body()
print("set_rigid_body_from_hierarchies> WARNING particle %s already belongs to rigid body %s" % (p.get_name(), rb.get_name()))
else:
rigid_parts.add(p)
rb = IMP.atom.create_rigid_body(list(rigid_parts))
rb.set_coordinates_are_optimized(True)
rb.set_name(''.join(str(subunits)) + "_rigid_body")
self.rigid_bodies.append(rb)
return rb
def set_super_rigid_body_from_hierarchies(
self,
hiers,
axis=None,
min_size=1):
# axis is the rotation axis for 2D rotation
super_rigid_xyzs = set()
super_rigid_rbs = set()
name = ""
print("set_super_rigid_body_from_hierarchies> setting up a new SUPER rigid body")
for hier in hiers:
ps = IMP.atom.get_leaves(hier)
for p in ps:
if IMP.core.RigidMember.get_is_setup(p):
rb = IMP.core.RigidMember(p).get_rigid_body()
super_rigid_rbs.add(rb)
else:
super_rigid_xyzs.add(p)
print("set_rigid_body_from_hierarchies> adding %s to the rigid body" % hier.get_name())
if len(super_rigid_rbs|super_rigid_xyzs) < min_size:
return
if axis is None:
self.super_rigid_bodies.append((super_rigid_xyzs, super_rigid_rbs))
else:
# these will be 2D rotation SRB or a bond rotamer (axis can be a IMP.algebra.Vector3D or particle Pair)
self.super_rigid_bodies.append(
(super_rigid_xyzs, super_rigid_rbs, axis))
def fix_rigid_bodies(self, rigid_bodies):
self.fixed_rigid_bodies += rigid_bodies
def set_chain_of_super_rigid_bodies(
self, hiers, min_length=None, max_length=None, axis=None):
'''
Make a chain of super rigid bodies from a list of hierarchies.
Takes a linear list of hierarchies (they are supposed to be
sequence-contiguous) and produces a chain of super rigid bodies
with given length range, specified by min_length and max_length.
'''
try:
hiers = IMP.pmi.tools.flatten_list(hiers)
except:
pass
for hs in IMP.pmi.tools.sublist_iterator(hiers, min_length, max_length):
self.set_super_rigid_body_from_hierarchies(hs, axis, min_length)
def set_super_rigid_bodies(self, subunits, coords=None):
super_rigid_xyzs = set()
super_rigid_rbs = set()
for s in subunits:
if type(s) == type(tuple()) and len(s) == 3:
sel = IMP.atom.Selection(
self.prot,
molecule=s[2],
residue_indexes=list(range(s[0],
s[1] + 1)))
if len(sel.get_selected_particles()) == 0:
print("set_rigid_bodies: selected particle does not exist")
for p in sel.get_selected_particles():
if IMP.core.RigidMember.get_is_setup(p):
rb = IMP.core.RigidMember(p).get_rigid_body()
super_rigid_rbs.add(rb)
else:
super_rigid_xyzs.add(p)
elif type(s) == type(str()):
sel = IMP.atom.Selection(self.prot, molecule=s)
if len(sel.get_selected_particles()) == 0:
print("set_rigid_bodies: selected particle does not exist")
for p in sel.get_selected_particles():
# if not p in self.floppy_bodies:
if IMP.core.RigidMember.get_is_setup(p):
rb = IMP.core.RigidMember(p).get_rigid_body()
super_rigid_rbs.add(rb)
else:
super_rigid_xyzs.add(p)
self.super_rigid_bodies.append((super_rigid_xyzs, super_rigid_rbs))
def remove_floppy_bodies_from_component(self, component_name):
'''
Remove leaves of hierarchies from the floppy bodies list based
on the component name
'''
hs=IMP.atom.get_leaves(self.hier_dict[component_name])
ps=[h.get_particle() for h in hs]
for p in self.floppy_bodies:
try:
if p in ps: self.floppy_bodies.remove(p)
if p in hs: self.floppy_bodies.remove(p)
except:
continue
def remove_floppy_bodies(self, hierarchies):
'''
Remove leaves of hierarchies from the floppy bodies list.
Given a list of hierarchies, get the leaves and remove the
corresponding particles from the floppy bodies list. We need this
function because sometimes
we want to constrain the floppy bodies in a rigid body - for instance
when you want to associate a bead with a density particle.
'''
for h in hierarchies:
ps = IMP.atom.get_leaves(h)
for p in ps:
if p in self.floppy_bodies:
print("remove_floppy_bodies: removing %s from floppy body list" % p.get_name())
self.floppy_bodies.remove(p)
def set_floppy_bodies(self):
for p in self.floppy_bodies:
name = p.get_name()
p.set_name(name + "_floppy_body")
if IMP.core.RigidMember.get_is_setup(p):
print("I'm trying to make this particle flexible although it was assigned to a rigid body", p.get_name())
rb = IMP.core.RigidMember(p).get_rigid_body()
try:
rb.set_is_rigid_member(p.get_index(), False)
except:
# some IMP versions still work with that
rb.set_is_rigid_member(p.get_particle_index(), False)
p.set_name(p.get_name() + "_rigid_body_member")
def set_floppy_bodies_from_hierarchies(self, hiers):
for hier in hiers:
ps = IMP.atom.get_leaves(hier)
for p in ps:
IMP.core.XYZ(p).set_coordinates_are_optimized(True)
self.floppy_bodies.append(p)
def get_particles_from_selection_tuples(
self,
selection_tuples,
resolution=None):
'''
selection tuples must be [(r1,r2,"name1"),(r1,r2,"name2"),....]
@return the particles
'''
particles = []
print(selection_tuples)
for s in selection_tuples:
ps = IMP.pmi.tools.select_by_tuple(
representation=self, tupleselection=tuple(s),
resolution=None, name_is_ambiguous=False)
particles += ps
return particles
def get_connected_intra_pairs(self):
return self.connected_intra_pairs
def set_rigid_bodies_max_trans(self, maxtrans):
self.maxtrans_rb = maxtrans
def set_rigid_bodies_max_rot(self, maxrot):
self.maxrot_rb = maxrot
def set_super_rigid_bodies_max_trans(self, maxtrans):
self.maxtrans_srb = maxtrans
def set_super_rigid_bodies_max_rot(self, maxrot):
self.maxrot_srb = maxrot
def set_floppy_bodies_max_trans(self, maxtrans):
self.maxtrans_fb = maxtrans
def set_rigid_bodies_as_fixed(self, rigidbodiesarefixed=True):
'''
Fix rigid bodies in their actual position.
The get_particles_to_sample() function will return
just the floppy bodies (if they are not fixed).
'''
self.rigidbodiesarefixed = rigidbodiesarefixed
def set_floppy_bodies_as_fixed(self, floppybodiesarefixed=True):
'''
Fix floppy bodies in their actual position.
The get_particles_to_sample() function will return
just the rigid bodies (if they are not fixed).
'''
self.floppybodiesarefixed=floppybodiesarefixed
def draw_hierarchy_graph(self):
for c in IMP.atom.Hierarchy(self.prot).get_children():
print("Drawing hierarchy graph for " + c.get_name())
IMP.pmi.output.get_graph_from_hierarchy(c)
def get_geometries(self):
# create segments at the lowest resolution
seggeos = []
for name in self.hier_geometry_pairs:
for pt in self.hier_geometry_pairs[name]:
p1 = pt[0]
p2 = pt[1]
color = pt[2]
try:
clr = IMP.display.get_rgb_color(color)
except:
clr = IMP.display.get_rgb_color(1.0)
coor1 = IMP.core.XYZ(p1).get_coordinates()
coor2 = IMP.core.XYZ(p2).get_coordinates()
seg = IMP.algebra.Segment3D(coor1, coor2)
seggeos.append(IMP.display.SegmentGeometry(seg, clr))
return seggeos
def setup_bonds(self):
# create segments at the lowest resolution
seggeos = []
for name in self.hier_geometry_pairs:
for pt in self.hier_geometry_pairs[name]:
p1 = pt[0]
p2 = pt[1]
if not IMP.atom.Bonded.get_is_setup(p1):
IMP.atom.Bonded.setup_particle(p1)
if not IMP.atom.Bonded.get_is_setup(p2):
IMP.atom.Bonded.setup_particle(p2)
if not IMP.atom.get_bond(IMP.atom.Bonded(p1),IMP.atom.Bonded(p2)):
IMP.atom.create_bond(
IMP.atom.Bonded(p1),
IMP.atom.Bonded(p2),
1)
def show_component_table(self, name):
if name in self.sequence_dict:
lastresn = len(self.sequence_dict[name])
firstresn = 1
else:
residues = self.hier_db.get_residue_numbers(name)
firstresn = min(residues)
lastresn = max(residues)
for nres in range(firstresn, lastresn + 1):
try:
resolutions = self.hier_db.get_residue_resolutions(name, nres)
ps = []
for r in resolutions:
ps += self.hier_db.get_particles(name, nres, r)
print("%20s %7s" % (name, nres), " ".join(["%20s %7s" % (str(p.get_name()),
str(IMP.pmi.Resolution(p).get_resolution())) for p in ps]))
except:
print("%20s %20s" % (name, nres), "**** not represented ****")
def draw_hierarchy_composition(self):
ks = list(self.elements.keys())
ks.sort()
max = 0
for k in self.elements:
for l in self.elements[k]:
if l[1] > max:
max = l[1]
for k in ks:
self.draw_component_composition(k, max)
def draw_component_composition(self, name, max=1000, draw_pdb_names=False):
from matplotlib import pyplot
import matplotlib as mpl
k = name
tmplist = sorted(self.elements[k], key=itemgetter(0))
try:
endres = tmplist[-1][1]
except:
print("draw_component_composition: missing information for component %s" % name)
return
fig = pyplot.figure(figsize=(26.0 * float(endres) / max + 2, 2))
ax = fig.add_axes([0.05, 0.475, 0.9, 0.15])
# Set the colormap and norm to correspond to the data for which
# the colorbar will be used.
cmap = mpl.cm.cool
norm = mpl.colors.Normalize(vmin=5, vmax=10)
bounds = [1]
colors = []
for n, l in enumerate(tmplist):
firstres = l[0]
lastres = l[1]
if l[3] != "end":
if bounds[-1] != l[0]:
colors.append("white")
bounds.append(l[0])
if l[3] == "pdb":
colors.append("#99CCFF")
if l[3] == "bead":
colors.append("#FFFF99")
if l[3] == "helix":
colors.append("#33CCCC")
if l[3] != "end":
bounds.append(l[1] + 1)
else:
if l[3] == "pdb":
colors.append("#99CCFF")
if l[3] == "bead":
colors.append("#FFFF99")
if l[3] == "helix":
colors.append("#33CCCC")
if l[3] != "end":
bounds.append(l[1] + 1)
else:
if bounds[-1] - 1 == l[0]:
bounds.pop()
bounds.append(l[0])
else:
colors.append("white")
bounds.append(l[0])
bounds.append(bounds[-1])
colors.append("white")
cmap = mpl.colors.ListedColormap(colors)
cmap.set_over('0.25')
cmap.set_under('0.75')
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cb2 = mpl.colorbar.ColorbarBase(ax, cmap=cmap,
norm=norm,
# to use 'extend', you must
# specify two extra boundaries:
boundaries=bounds,
ticks=bounds, # optional
spacing='proportional',
orientation='horizontal')
extra_artists = []
npdb = 0
if draw_pdb_names:
for l in tmplist:
if l[3] == "pdb":
npdb += 1
mid = 1.0 / endres * float(l[0])
# t =ax.text(mid, float(npdb-1)/2.0+1.5, l[2], ha="left", va="center", rotation=0,
# size=10)
# t=ax.annotate(l[0],2)
t = ax.annotate(
l[2], xy=(mid, 1), xycoords='axes fraction',
xytext=(mid + 0.025, float(npdb - 1) / 2.0 + 1.5), textcoords='axes fraction',
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
extra_artists.append(t)
# set the title of the bar
title = ax.text(-0.005, 0.5, k, ha="right", va="center", rotation=90,
size=20)
extra_artists.append(title)
# changing the xticks labels
labels = len(bounds) * [" "]
ax.set_xticklabels(labels)
mid = 1.0 / endres * float(bounds[0])
t = ax.annotate(bounds[0], xy=(mid, 0), xycoords='axes fraction',
xytext=(mid - 0.01, -0.5), textcoords='axes fraction',)
extra_artists.append(t)
offsets = [0, -0.5, -1.0]
nclashes = 0
for n in range(1, len(bounds)):
if bounds[n] == bounds[n - 1]:
continue
mid = 1.0 / endres * float(bounds[n])
if (float(bounds[n]) - float(bounds[n - 1])) / max <= 0.01:
nclashes += 1
offset = offsets[nclashes % 3]
else:
nclashes = 0
offset = offsets[0]
if offset > -0.75:
t = ax.annotate(
bounds[n], xy=(mid, 0), xycoords='axes fraction',
xytext=(mid, -0.5 + offset), textcoords='axes fraction')
else:
t = ax.annotate(
bounds[n], xy=(mid, 0), xycoords='axes fraction',
xytext=(mid, -0.5 + offset), textcoords='axes fraction', arrowprops=dict(arrowstyle="-"))
extra_artists.append(t)
cb2.add_lines(bounds, ["black"] * len(bounds), [1] * len(bounds))
# cb2.set_label(k)
pyplot.savefig(
k + "structure.pdf",
dpi=150,
transparent="True",
bbox_extra_artists=(extra_artists),
bbox_inches='tight')
pyplot.show()
def draw_coordinates_projection(self):
import matplotlib.pyplot as pp
xpairs = []
ypairs = []
for name in self.hier_geometry_pairs:
for pt in self.hier_geometry_pairs[name]:
p1 = pt[0]
p2 = pt[1]
color = pt[2]
coor1 = IMP.core.XYZ(p1).get_coordinates()
coor2 = IMP.core.XYZ(p2).get_coordinates()
x1 = coor1[0]
x2 = coor2[0]
y1 = coor1[1]
y2 = coor2[1]
xpairs.append([x1, x2])
ypairs.append([y1, y2])
xlist = []
ylist = []
for xends, yends in zip(xpairs, ypairs):
xlist.extend(xends)
xlist.append(None)
ylist.extend(yends)
ylist.append(None)
pp.plot(xlist, ylist, 'b-', alpha=0.1)
pp.show()
def get_prot_name_from_particle(self, particle):
names = self.get_component_names()
particle0 = particle
name = None
while not name in names:
h = IMP.atom.Hierarchy(particle0).get_parent()
name = h.get_name()
particle0 = h.get_particle()
return name
def get_particles_to_sample(self):
# get the list of samplable particles with their type
# and the mover displacement. Everything wrapped in a dictionary,
# to be used by samplers modules
ps = {}
# remove symmetric particles: they are not sampled
rbtmp = []
fbtmp = []
srbtmp = []
if not self.rigidbodiesarefixed:
for rb in self.rigid_bodies:
if IMP.pmi.Symmetric.get_is_setup(rb):
if IMP.pmi.Symmetric(rb).get_symmetric() != 1:
rbtmp.append(rb)
else:
if rb not in self.fixed_rigid_bodies:
rbtmp.append(rb)
if not self.floppybodiesarefixed:
for fb in self.floppy_bodies:
if IMP.pmi.Symmetric.get_is_setup(fb):
if IMP.pmi.Symmetric(fb).get_symmetric() != 1:
fbtmp.append(fb)
else:
fbtmp.append(fb)
for srb in self.super_rigid_bodies:
# going to prune the fixed rigid bodies out
# of the super rigid body list
rigid_bodies = list(srb[1])
filtered_rigid_bodies = []
for rb in rigid_bodies:
if rb not in self.fixed_rigid_bodies:
filtered_rigid_bodies.append(rb)
srbtmp.append((srb[0], filtered_rigid_bodies))
self.rigid_bodies = rbtmp
self.floppy_bodies = fbtmp
self.super_rigid_bodies = srbtmp
ps["Rigid_Bodies_SimplifiedModel"] = (
self.rigid_bodies,
self.maxtrans_rb,
self.maxrot_rb)
ps["Floppy_Bodies_SimplifiedModel"] = (
self.floppy_bodies,
self.maxtrans_fb)
ps["SR_Bodies_SimplifiedModel"] = (
self.super_rigid_bodies,
self.maxtrans_srb,
self.maxrot_srb)
return ps
def set_output_level(self, level):
self.output_level = level
def _evaluate(self, deriv):
"""Evaluate the total score of all added restraints"""
r = IMP.pmi.tools.get_restraint_set(self.m)
return r.evaluate(deriv)
def get_output(self):
output = {}
score = 0.0
output["SimplifiedModel_Total_Score_" +
self.label] = str(self._evaluate(False))
output["SimplifiedModel_Linker_Score_" +
self.label] = str(self.linker_restraints.unprotected_evaluate(None))
for name in self.sortedsegments_cr_dict:
partialscore = self.sortedsegments_cr_dict[name].evaluate(False)
score += partialscore
output[
"SimplifiedModel_Link_SortedSegments_" +
name +
"_" +
self.label] = str(
partialscore)
partialscore = self.unmodeledregions_cr_dict[name].evaluate(False)
score += partialscore
output[
"SimplifiedModel_Link_UnmodeledRegions_" +
name +
"_" +
self.label] = str(
partialscore)
for rb in self.rigid_body_symmetries:
name=rb.get_name()
output[name +"_" +self.label]=str(rb.get_reference_frame().get_transformation_to())
for name in self.linker_restraints_dict:
output[
name +
"_" +
self.label] = str(
self.linker_restraints_dict[
name].unprotected_evaluate(
None))
if len(self.reference_structures.keys()) != 0:
rmsds = self.get_all_rmsds()
for label in rmsds:
output[
"SimplifiedModel_" +
label +
"_" +
self.label] = rmsds[
label]
if self.output_level == "high":
for p in IMP.atom.get_leaves(self.prot):
d = IMP.core.XYZR(p)
output["Coordinates_" +
p.get_name() + "_" + self.label] = str(d)
output["_TotalScore"] = str(score)
return output
def get_test_output(self):
# this method is called by test functions and return an enriched output
output = self.get_output()
for n, p in enumerate(self.get_particles_to_sample()):
output["Particle_to_sample_" + str(n)] = str(p)
output["Number_of_particles"] = len(IMP.atom.get_leaves(self.prot))
output["Hierarchy_Dictionary"] = list(self.hier_dict.keys())
output["Number_of_floppy_bodies"] = len(self.floppy_bodies)
output["Number_of_rigid_bodies"] = len(self.rigid_bodies)
output["Number_of_super_bodies"] = len(self.super_rigid_bodies)
output["Selection_resolution_1"] = len(
IMP.pmi.tools.select(self, resolution=1))
output["Selection_resolution_5"] = len(
IMP.pmi.tools.select(self, resolution=5))
output["Selection_resolution_7"] = len(
IMP.pmi.tools.select(self, resolution=5))
output["Selection_resolution_10"] = len(
IMP.pmi.tools.select(self, resolution=10))
output["Selection_resolution_100"] = len(
IMP.pmi.tools.select(self, resolution=100))
output["Selection_All"] = len(IMP.pmi.tools.select(self))
output["Selection_resolution=1"] = len(
IMP.pmi.tools.select(self, resolution=1))
output["Selection_resolution=1,resid=10"] = len(
IMP.pmi.tools.select(self, resolution=1, residue=10))
for resolution in self.hier_resolution:
output["Hier_resolution_dictionary" +
str(resolution)] = len(self.hier_resolution[resolution])
for name in self.hier_dict:
output[
"Selection_resolution=1,resid=10,name=" +
name] = len(
IMP.pmi.tools.select(
self,
resolution=1,
name=name,
residue=10))
output[
"Selection_resolution=1,resid=10,name=" +
name +
",ambiguous"] = len(
IMP.pmi.tools.select(
self,
resolution=1,
name=name,
name_is_ambiguous=True,
residue=10))
output[
"Selection_resolution=1,resid=10,name=" +
name +
",ambiguous"] = len(
IMP.pmi.tools.select(
self,
resolution=1,
name=name,
name_is_ambiguous=True,
residue=10))
output[
"Selection_resolution=1,resrange=(10,20),name=" +
name] = len(
IMP.pmi.tools.select(
self,
resolution=1,
name=name,
first_residue=10,
last_residue=20))
output[
"Selection_resolution=1,resrange=(10,20),name=" +
name +
",ambiguous"] = len(
IMP.pmi.tools.select(
self,
resolution=1,
name=name,
name_is_ambiguous=True,
first_residue=10,
last_residue=20))
output[
"Selection_resolution=10,resrange=(10,20),name=" +
name] = len(
IMP.pmi.tools.select(
self,
resolution=10,
name=name,
first_residue=10,
last_residue=20))
output[
"Selection_resolution=10,resrange=(10,20),name=" +
name +
",ambiguous"] = len(
IMP.pmi.tools.select(
self,
resolution=10,
name=name,
name_is_ambiguous=True,
first_residue=10,
last_residue=20))
output[
"Selection_resolution=100,resrange=(10,20),name=" +
name] = len(
IMP.pmi.tools.select(
self,
resolution=100,
name=name,
first_residue=10,
last_residue=20))
output[
"Selection_resolution=100,resrange=(10,20),name=" +
name +
",ambiguous"] = len(
IMP.pmi.tools.select(
self,
resolution=100,
name=name,
name_is_ambiguous=True,
first_residue=10,
last_residue=20))
return output
| gpl-3.0 |
lewismc/topik | topik/models/model_base.py | 1 | 3158 | from abc import ABCMeta, abstractmethod
import logging
import json
import pandas as pd
from six import with_metaclass
# doctest-only imports
from topik.preprocessing import preprocess
from topik.readers import read_input
from topik.tests import test_data_path
from topik.intermediaries.persistence import Persistor
registered_models = {}
def register_model(cls):
global registered_models
if cls.__name__ not in registered_models:
registered_models[cls.__name__] = cls
return cls
class TopicModelBase(with_metaclass(ABCMeta)):
corpus = None
@abstractmethod
def get_top_words(self, topn):
"""Method should collect top n words per topic, translate indices/ids to words.
Return a list of lists of tuples:
- outer list: topics
- inner lists: length topn collection of (weight, word) tuples
"""
pass
@abstractmethod
def save(self, filename, saved_data):
self.persistor.store_model(self.get_model_name_with_parameters(),
{"class": self.__class__.__name__,
"saved_data": saved_data})
self.corpus.save(filename)
@abstractmethod
def get_model_name_with_parameters(self):
raise NotImplementedError
def termite_data(self, filename=None, topn_words=15):
"""Generate the csv file input for the termite plot.
Parameters
----------
filename: string
Desired name for the generated csv file
>>> raw_data = read_input('{}/test_data_json_stream.json'.format(test_data_path), "abstract")
>>> processed_data = preprocess(raw_data) # preprocess returns a DigestedDocumentCollection
>>> model = registered_models["LDA"](processed_data, ntopics=3)
>>> model.termite_data('termite.csv', 15)
"""
count = 1
for topic in self.get_top_words(topn_words):
if count == 1:
df_temp = pd.DataFrame(topic, columns=['weight', 'word'])
df_temp['topic'] = pd.Series(count, index=df_temp.index)
df = df_temp
else:
df_temp = pd.DataFrame(topic, columns=['weight', 'word'])
df_temp['topic'] = pd.Series(count, index=df_temp.index)
df = df.append(df_temp, ignore_index=True)
count += 1
if filename:
logging.info("saving termite plot input csv file to %s " % filename)
df.to_csv(filename, index=False, encoding='utf-8')
return
return df
@property
def persistor(self):
return self.corpus.persistor
def load_model(filename, model_name):
"""Loads a JSON file containing instructions on how to load model data.
Returns a TopicModelBase-derived object."""
p = Persistor(filename)
if model_name in p.list_available_models():
data_dict = p.get_model_details(model_name)
model = registered_models[data_dict['class']](**data_dict["saved_data"])
else:
raise NameError("Model name {} has not yet been created.".format(model_name))
return model
| bsd-3-clause |
rosswhitfield/mantid | Framework/PythonInterface/mantid/plots/mantidimage.py | 3 | 2051 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import matplotlib.image as mimage
import numpy as np
from enum import Enum
# Threshold defining whether an image is light or dark ( x < Threshold = Dark)
THRESHOLD = 100
class ImageIntensity(Enum):
LIGHT = 1
DARK = 2
class MantidImage(mimage.AxesImage):
def __init__(self,
ax,
cmap=None,
norm=None,
interpolation=None,
origin=None,
extent=None,
filternorm=1,
filterrad=4.0,
resample=False,
**kwargs):
super().__init__(ax,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
extent=extent,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs)
def calculate_greyscale_intensity(self) -> ImageIntensity:
"""
Calculate the intensity of the image in greyscale.
The intensity is given in the range [0, 255] where:
- 0 is black - i.e. a dark image and
- 255 is white, a light image.
"""
rgb = self.to_rgba(self._A, alpha=None, bytes=True, norm=True)
r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
# CCIR 601 conversion from rgb to luma/greyscale
# see https://en.wikipedia.org/wiki/Luma_(video)
grey = 0.2989 * r + 0.5870 * g + 0.1140 * b
mean = np.mean(grey)
if mean > THRESHOLD:
return ImageIntensity.LIGHT
else:
return ImageIntensity.DARK
| gpl-3.0 |
dongjoon-hyun/spark | python/pyspark/pandas/tests/test_sql.py | 15 | 1979 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import pandas as ps
from pyspark.sql.utils import ParseException
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
class SQLTest(PandasOnSparkTestCase, SQLTestUtils):
def test_error_variable_not_exist(self):
msg = "The key variable_foo in the SQL statement was not found.*"
with self.assertRaisesRegex(ValueError, msg):
ps.sql("select * from {variable_foo}")
def test_error_unsupported_type(self):
msg = "Unsupported variable type dict: {'a': 1}"
with self.assertRaisesRegex(ValueError, msg):
some_dict = {"a": 1}
ps.sql("select * from {some_dict}")
def test_error_bad_sql(self):
with self.assertRaises(ParseException):
ps.sql("this is not valid sql")
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_sql import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
DGrady/pandas | asv_bench/benchmarks/stat_ops.py | 7 | 6106 | from .pandas_vb_common import *
def _set_use_bottleneck_False():
try:
pd.options.compute.use_bottleneck = False
except:
from pandas.core import nanops
nanops._USE_BOTTLENECK = False
class FrameOps(object):
goal_time = 0.2
param_names = ['op', 'use_bottleneck', 'dtype', 'axis']
params = [['mean', 'sum', 'median'],
[True, False],
['float', 'int'],
[0, 1]]
def setup(self, op, use_bottleneck, dtype, axis):
if dtype == 'float':
self.df = DataFrame(np.random.randn(100000, 4))
elif dtype == 'int':
self.df = DataFrame(np.random.randint(1000, size=(100000, 4)))
if not use_bottleneck:
_set_use_bottleneck_False()
self.func = getattr(self.df, op)
def time_op(self, op, use_bottleneck, dtype, axis):
self.func(axis=axis)
class stat_ops_level_frame_sum(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex(levels=[np.arange(10), np.arange(100), np.arange(100)], labels=[np.arange(10).repeat(10000), np.tile(np.arange(100).repeat(100), 10), np.tile(np.tile(np.arange(100), 100), 10)])
random.shuffle(self.index.values)
self.df = DataFrame(np.random.randn(len(self.index), 4), index=self.index)
self.df_level = DataFrame(np.random.randn(100, 4), index=self.index.levels[1])
def time_stat_ops_level_frame_sum(self):
self.df.sum(level=1)
class stat_ops_level_frame_sum_multiple(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex(levels=[np.arange(10), np.arange(100), np.arange(100)], labels=[np.arange(10).repeat(10000), np.tile(np.arange(100).repeat(100), 10), np.tile(np.tile(np.arange(100), 100), 10)])
random.shuffle(self.index.values)
self.df = DataFrame(np.random.randn(len(self.index), 4), index=self.index)
self.df_level = DataFrame(np.random.randn(100, 4), index=self.index.levels[1])
def time_stat_ops_level_frame_sum_multiple(self):
self.df.sum(level=[0, 1])
class stat_ops_level_series_sum(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex(levels=[np.arange(10), np.arange(100), np.arange(100)], labels=[np.arange(10).repeat(10000), np.tile(np.arange(100).repeat(100), 10), np.tile(np.tile(np.arange(100), 100), 10)])
random.shuffle(self.index.values)
self.df = DataFrame(np.random.randn(len(self.index), 4), index=self.index)
self.df_level = DataFrame(np.random.randn(100, 4), index=self.index.levels[1])
def time_stat_ops_level_series_sum(self):
self.df[1].sum(level=1)
class stat_ops_level_series_sum_multiple(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex(levels=[np.arange(10), np.arange(100), np.arange(100)], labels=[np.arange(10).repeat(10000), np.tile(np.arange(100).repeat(100), 10), np.tile(np.tile(np.arange(100), 100), 10)])
random.shuffle(self.index.values)
self.df = DataFrame(np.random.randn(len(self.index), 4), index=self.index)
self.df_level = DataFrame(np.random.randn(100, 4), index=self.index.levels[1])
def time_stat_ops_level_series_sum_multiple(self):
self.df[1].sum(level=[0, 1])
class stat_ops_series_std(object):
goal_time = 0.2
def setup(self):
self.s = Series(np.random.randn(100000), index=np.arange(100000))
self.s[::2] = np.nan
def time_stat_ops_series_std(self):
self.s.std()
class stats_corr_spearman(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(1000, 30))
def time_stats_corr_spearman(self):
self.df.corr(method='spearman')
class stats_rank2d_axis0_average(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(5000, 50))
def time_stats_rank2d_axis0_average(self):
self.df.rank()
class stats_rank2d_axis1_average(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(5000, 50))
def time_stats_rank2d_axis1_average(self):
self.df.rank(1)
class stats_rank_average(object):
goal_time = 0.2
def setup(self):
self.values = np.concatenate([np.arange(100000), np.random.randn(100000), np.arange(100000)])
self.s = Series(self.values)
def time_stats_rank_average(self):
self.s.rank()
class stats_rank_average_int(object):
goal_time = 0.2
def setup(self):
self.values = np.random.randint(0, 100000, size=200000)
self.s = Series(self.values)
def time_stats_rank_average_int(self):
self.s.rank()
class stats_rank_pct_average(object):
goal_time = 0.2
def setup(self):
self.values = np.concatenate([np.arange(100000), np.random.randn(100000), np.arange(100000)])
self.s = Series(self.values)
def time_stats_rank_pct_average(self):
self.s.rank(pct=True)
class stats_rank_pct_average_old(object):
goal_time = 0.2
def setup(self):
self.values = np.concatenate([np.arange(100000), np.random.randn(100000), np.arange(100000)])
self.s = Series(self.values)
def time_stats_rank_pct_average_old(self):
(self.s.rank() / len(self.s))
class stats_rolling_mean(object):
goal_time = 0.2
def setup(self):
self.arr = np.random.randn(100000)
self.win = 100
def time_rolling_mean(self):
rolling_mean(self.arr, self.win)
def time_rolling_median(self):
rolling_median(self.arr, self.win)
def time_rolling_min(self):
rolling_min(self.arr, self.win)
def time_rolling_max(self):
rolling_max(self.arr, self.win)
def time_rolling_sum(self):
rolling_sum(self.arr, self.win)
def time_rolling_std(self):
rolling_std(self.arr, self.win)
def time_rolling_var(self):
rolling_var(self.arr, self.win)
def time_rolling_skew(self):
rolling_skew(self.arr, self.win)
def time_rolling_kurt(self):
rolling_kurt(self.arr, self.win)
| bsd-3-clause |
alongwithyou/auto-sklearn | test/models/test_cv_evaluator.py | 5 | 7929 | import copy
import functools
import os
import unittest
import numpy as np
from numpy.linalg import LinAlgError
from autosklearn.data.competition_data_manager import CompetitionDataManager
from autosklearn.models.cv_evaluator import CVEvaluator
from autosklearn.models.paramsklearn import get_configuration_space
from ParamSklearn.util import get_dataset
from autosklearn.constants import *
N_TEST_RUNS = 10
class Dummy(object):
pass
class CVEvaluator_Test(unittest.TestCase):
def test_evaluate_multiclass_classification(self):
X_train, Y_train, X_test, Y_test = get_dataset('iris')
X_valid = X_test[:25, ]
Y_valid = Y_test[:25, ]
X_test = X_test[25:, ]
Y_test = Y_test[25:, ]
D = Dummy()
D.info = {'metric': 'bac_metric', 'task': MULTICLASS_CLASSIFICATION,
'is_sparse': False, 'target_num': 3}
D.data = {'X_train': X_train, 'Y_train': Y_train,
'X_valid': X_valid, 'X_test': X_test}
D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']
configuration_space = get_configuration_space(D.info,
include_estimators=['ridge'],
include_preprocessors=['select_rates'])
err = np.zeros([N_TEST_RUNS])
num_models_better_than_random = 0
for i in range(N_TEST_RUNS):
print "Evaluate configuration: %d; result:" % i,
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = CVEvaluator(D_, configuration,
with_predictions=True)
if not self._fit(evaluator):
print
continue
e_, Y_optimization_pred, Y_valid_pred, Y_test_pred = \
evaluator.predict()
err[i] = e_
print err[i], configuration['classifier']
num_targets = len(np.unique(Y_train))
self.assertTrue(np.isfinite(err[i]))
self.assertGreaterEqual(err[i], 0.0)
# Test that ten models were trained
self.assertEqual(len(evaluator.models), 10)
self.assertEqual(Y_optimization_pred.shape[0], Y_train.shape[0])
self.assertEqual(Y_optimization_pred.shape[1], num_targets)
self.assertEqual(Y_valid_pred.shape[0], Y_valid.shape[0])
self.assertEqual(Y_valid_pred.shape[1], num_targets)
self.assertEqual(Y_test_pred.shape[0], Y_test.shape[0])
self.assertEqual(Y_test_pred.shape[1], num_targets)
# Test some basic statistics of the dataset
if err[i] < 0.5:
self.assertTrue(0.3 < Y_valid_pred.mean() < 0.36666)
self.assertGreaterEqual(Y_valid_pred.std(), 0.01)
self.assertTrue(0.3 < Y_test_pred.mean() < 0.36666)
self.assertGreaterEqual(Y_test_pred.std(), 0.01)
num_models_better_than_random += 1
self.assertGreater(num_models_better_than_random, 5)
def test_evaluate_multiclass_classification_partial_fit(self):
X_train, Y_train, X_test, Y_test = get_dataset('iris')
X_valid = X_test[:25, ]
Y_valid = Y_test[:25, ]
X_test = X_test[25:, ]
Y_test = Y_test[25:, ]
D = Dummy()
D.info = {'metric': 'bac_metric', 'task': MULTICLASS_CLASSIFICATION,
'is_sparse': False, 'target_num': 3}
D.data = {'X_train': X_train, 'Y_train': Y_train,
'X_valid': X_valid, 'X_test': X_test}
D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']
configuration_space = get_configuration_space(D.info,
include_estimators=['ridge'],
include_preprocessors=['select_rates'])
err = np.zeros([N_TEST_RUNS])
num_models_better_than_random = 0
for i in range(N_TEST_RUNS):
print "Evaluate configuration: %d; result:" % i,
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = CVEvaluator(D_, configuration,
with_predictions=True)
if not self._partial_fit(evaluator, fold=i % 10):
print
continue
e_, Y_optimization_pred, Y_valid_pred, Y_test_pred = \
evaluator.predict()
err[i] = e_
print err[i], configuration['classifier']
self.assertTrue(np.isfinite(err[i]))
self.assertGreaterEqual(err[i], 0.0)
# Test that only one model was trained
self.assertEqual(len(evaluator.models), 10)
self.assertEqual(1, np.sum([True if model is not None else False
for model in evaluator.models]))
self.assertLess(Y_optimization_pred.shape[0], 13)
self.assertEqual(Y_valid_pred.shape[0], Y_valid.shape[0])
self.assertEqual(Y_test_pred.shape[0], Y_test.shape[0])
# Test some basic statistics of the dataset
if err[i] < 0.5:
self.assertTrue(0.3 < Y_valid_pred.mean() < 0.36666)
self.assertGreaterEqual(Y_valid_pred.std(), 0.01)
self.assertTrue(0.3 < Y_test_pred.mean() < 0.36666)
self.assertGreaterEqual(Y_test_pred.std(), 0.01)
num_models_better_than_random += 1
self.assertGreaterEqual(num_models_better_than_random, 5)
def test_with_abalone(self):
dataset = "abalone"
dataset_dir = os.path.join(os.path.dirname(__file__), ".datasets")
D = CompetitionDataManager(dataset, dataset_dir)
configuration_space = get_configuration_space(D.info,
include_estimators=['extra_trees'],
include_preprocessors=['no_preprocessing'])
errors = []
for i in range(N_TEST_RUNS):
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = CVEvaluator(D_, configuration, cv_folds=5)
if not self._fit(evaluator):
print
continue
err = evaluator.predict()
self.assertLess(err, 0.99)
self.assertTrue(np.isfinite(err))
errors.append(err)
# This is a reasonable bound
self.assertEqual(10, len(errors))
self.assertLess(min(errors), 0.77)
def _fit(self, evaluator):
return self.__fit(evaluator.fit)
def _partial_fit(self, evaluator, fold):
partial_fit = functools.partial(evaluator.partial_fit, fold=fold)
return self.__fit(partial_fit)
def __fit(self, function_handle):
"""Allow us to catch known and valid exceptions for all evaluate
scripts."""
try:
function_handle()
return True
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in e.message or \
"removed all features" in e.message or \
"failed to create intent" in e.message:
pass
else:
raise e
except LinAlgError as e:
if "not positive definite, even with jitter" in e.message:
pass
else:
raise e
except AttributeError as e:
# Some error in QDA
if "log" == e.message:
pass
else:
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.message:
pass
elif "divide by zero encountered in divide" in e.message:
pass
else:
raise e
except UserWarning as e:
if "FastICA did not converge" in e.message:
pass
else:
raise e | bsd-3-clause |
dolphyin/cs194-16-data_manatees | classificationSpecific/run_train2.py | 4 | 4651 | import numpy as np
from sklearn import svm
from sklearn import linear_model
from sklearn import cluster
from sklearn import neighbors
from sklearn import preprocessing
from sklearn import tree
from sklearn import ensemble
from sklearn import cross_validation
import time, pdb
import train
data_path = "./joined_matrix.txt"
features, output = train.get_training(data_path)
pdb.set_trace()
unit_features = preprocessing.scale(features, axis=1)
"""
print("Testing SVR on first 10,000 samples")
model1 = svm.SVR()
start = time.time()
result = cross_validation.cross_val_score(model1, features[0:100], output[0:100], verbose=True)
end = time.time()
print("Time to cross_validate on SVR model: %f"%(end - start))
print("Accuracies from cross validation %s"%result)
print("Average Accuracy %f"%np.average(result))
"""
# model2 = cluster.KMeans()
# model2 = linear_model.Ridge()
#### SVR OPTIMIZATION ###
"""
print("\n")
print("Testing SVR on first 10,000 samples")
# best value was kernel ='rbf', C=10, gamma=0.01
start = time.time()
model2 = svm.SVR()
features = unit_features
params_grid = [{'C': [1, 10, 100, 1000], 'gamma':[0.0001, 0.0001, 0.001, 0.01],'kernel':['rbf', 'poly']}]
opt_model = train.fine_tune(features[0:10000], output[0:10000], model2, params_grid = verbose=5)
score = opt_model.score(features[10000:20000], output[10000:20000])
end = time.time()
print("Time to cross_validate on SVR model: %f"%(end - start))
print("Accuracy of model on test set: %f, %s"%(score, opt_model))
"""
"""
### MORE FINE TUNE SVR OPTIMIZATION ###
start = time.time()
model2 = svm.SVR()
features = unit_features
params_grid = [{'C': np.arange(9, 11, 0.25), 'gamma':[0.0001, 0.0001, 0.001, 0.01],'kernel':['rbf', 'poly']}]
opt_model = train.fine_tune(features[0:10000], output[0:10000], model2, params_grid=params_grid, verbose=5)
score = opt_model.score(features[10000:20000], output[10000:20000])
end = time.time()
print("Time to cross_validate on SVR model: %f"%(end - start))
print("Accuracy of model on test set: %f, %s"%(score, opt_model))
"""
"""
### KNN REGRESSION ###
def better_inv_dist(dist):
c=1
return 1. / (c + dist)
start = time.time()
model = neighbors.KNeighborsRegressor()
features = unit_features
params_grid = [{'n_neighbors': [1, 5, 10, 100, 500, 1000], 'weights': ['uniform', better_inv_dist]}]
opt_model = train.fine_tune(features[0:60000], output[0:60000], model, params_grid=params_grid, verbose=5)
score = opt_model.score(features[100000:110000], output[100000:110000])
end = time.time()
print("Time to cross_validate on SVR model: %f"%(end - start))
print("Accuracy of model on test set: %f, %s"%(score, opt_model))
"""
"""
### DECISION TREE REGRESSION ###
start = time.time()
model = tree.DecisionTreeRegressor()
features = unit_features
params_grid = [{'splitter': ['best', 'random'], 'min_samples_leaf': np.arange(200, 500, 5)}]
opt_model = train.fine_tune(features[0:60000], output[0:60000], model, params_grid=params_grid, verbose=5)
score = opt_model.score(features[100000:110000], output[100000:110000])
end = time.time()
print("Time to fine-tune on SVR model: %f"%(end - start))
print("Optimal model parameters: %s"%opt_model.best_estimator_)
print("Best fine tune score: %f"%(opt_model.best_score_))
print("Accuracy of model on test set: %f"%(score))
"""
"""
### RANDOM FORESTS ###
start = time.time()
model = ensemble.RandomForestRegressor()
features = unit_features
params_grid = [{'n_estimators': [10, 50, 100, 300], 'min_samples_leaf': [2, 5, 10, 50, 100, 500, 1000]}]
opt_model = train.fine_tune(features[0:60000], output[0:60000], model, params_grid=params_grid, verbose=5)
score = opt_model.score(features[100000:110000], output[100000:110000])
end = time.time()
print("Time to fine-tune on SVR model: %f"%(end - start))
print("Optimal model parameters: %s"%opt_model.best_estimator_)
print("Best fine tune score: %f"%(opt_model.best_score_))
print("Accuracy of model on test set: %f"%(score))
"""
### ADABOOST FOREST REGRESSOR ###
start = time.time()
model = ensemble.AdaBoostRegressor()
features = unit_features
params_grid = [{'n_estimators': [10, 50, 100, 300],
'learning_rate': [0.1, 0.3, 0.5, 0.7, 1],
'loss': ['linear', 'square', 'exponential']}]
opt_model = train.fine_tune(features[0:60000], output[0:60000], model, params_grid=params_grid, verbose=5)
score = opt_model.score(features[100000:110000], output[100000:110000])
end = time.time()
print("Time to fine-tune on SVR model: %f"%(end - start))
print("Optimal model parameters: %s"%opt_model.best_estimator_)
print("Best fine tune score: %f"%(opt_model.best_score_))
print("Accuracy of model on test set: %f"%(score))
| apache-2.0 |
gjermv/potato | sccs/gpx/tcxtricks.py | 1 | 4297 | '''
Created on 7 Apr 2016
@author: gjermund.vingerhagen
'''
from lxml import etree as etree
import utmconverter as utm
import algos as algos
import gpxtricks
import pandas as pd
from datetime import datetime as dt
from datetime import timedelta as dtt
from matplotlib import pyplot as plt
import time
import googlemaps
import glob
import json
import numpy as np
def TCXtoDataFrame(filename):
f = open(filename,encoding='utf-8')
ns = findNamespace(f)
xml = etree.parse(f)
f.close()
TCXlist = list()
stime = xml.iter(ns+"Time")
segno = 0
name = xml.find(ns+"Activities/"+ns+"Activity/"+ns+"Id").text
desc = xml.find(ns+"Activities/"+ns+"Activity").attrib['Sport']
for item in stime:
startTime = gpxtricks.gpxtimeToStr(item.text)
break
for lap in xml.iter(ns+"Lap"):
segno += 1
for trkPoint in lap.iter(ns+"Trackpoint"):
trkpoint = dict()
trkpoint['name'] = name
trkpoint['desc'] = desc
trkpoint['segno'] = segno
trkpoint['time'] = trkPoint.find(ns+"Time").text
trkpoint['duration'] = (gpxtricks.gpxtimeToStr(trkpoint['time'])-startTime).total_seconds()
try:
trkpoint['lat'] = trkPoint.find(ns+"Position/"+ns+"LatitudeDegrees").text
trkpoint['lon'] = trkPoint.find(ns+"Position/"+ns+"LongitudeDegrees").text
trkpoint['ele'] = trkPoint.find(ns+"AltitudeMeters").text
trkpoint['dist'] = trkPoint.find(ns+"DistanceMeters").text
except:
trkpoint['lat'] = np.NAN
trkpoint['lon'] = np.NAN
trkpoint['ele'] = np.NAN
trkpoint['dist'] = np.NAN
try:
trkpoint['heartrate']= int(trkPoint.find(ns+"HeartRateBpm/"+ns+"Value").text)
except:
trkpoint['heartrate']= np.NAN
TCXlist.append(trkpoint)
df = pd.DataFrame(TCXlist)
df['time'] = pd.to_datetime(df['time'])
df['lat'] = df['lat'].astype(float)
df['lon'] = df['lon'].astype(float)
df['ele'] = df['ele'].astype(float)
df['dist'] = df['dist'].astype(float)
df['speed'] = (df['dist'].shift(-1)-df['dist'])/(df['duration'].shift(-1)-df['duration'])
df['speed'] = df['speed'].shift(1)
return df
def getTCXheartzone(dataframe):
df = dataframe
df['timediff'] = df['time'].diff()
df1 = df[(df['heartrate']<135) & (df['heartrate']>=0)]
df2 = df[(df['heartrate']<155) & (df['heartrate']>=135)]
df3 = df[(df['heartrate']<165) & (df['heartrate']>=155)]
df4 = df[(df['heartrate']<175) & (df['heartrate']>=165)]
df5 = df[df['heartrate']>=175]
hr5zone = sum(df5['timediff'].dt.seconds[1:])
hr4zone = sum(df4['timediff'].dt.seconds[1:])
hr3zone = sum(df3['timediff'].dt.seconds[1:])
hr2zone = sum(df2['timediff'].dt.seconds[1:])
hr1zone = sum(df1['timediff'].dt.seconds[1:])
return (hr1zone,hr2zone,hr3zone,hr4zone,hr5zone)
def getmainInfoTCX(dataframe):
length = max(dataframe['dist'])
tottime = dataframe['time'].max()-dataframe['time'].min()
dateandtime = dataframe['time'][0]
climbing = gpxtricks.getClimbingHeightGPS(dataframe)
stopframe = (dataframe[dataframe['speed']<0.4167][['duration']].index)
stoptime = sum(dataframe['duration'].diff()[stopframe])
walktime = tottime.total_seconds() - stoptime
info = dict()
info['length'] = round(length/1000,2) #km
info['dateandtime'] = dateandtime
info['tottime'] = tottime
info['walk_time'] = dtt(seconds=walktime)
info['avg_speed'] = length/walktime*3.6
info['climbing'] = round(climbing,1)
info['activity'] = ''
info['health'] = ''
info['comment'] = ''
info['sone1'],info['sone2'],info['sone3'],info['sone4'],info['sone5'] = getTCXheartzone(dataframe)
return info
def findNamespace(file):
str = file.read(1000)
file.seek(0)
ind1 = str.find('xmlns=')+7
ind2 = str[ind1+1:].find('"')
s = '{'+str[ind1:ind1+ind2+1]+'}'
return s
| gpl-2.0 |
kanhua/pypvcell | lab/SMARTS/smarts_df_util.py | 1 | 4419 | import numpy as np
import pandas as pd
import os
from SMARTS.smarts import get_clear_sky
from pvlib.tracking import singleaxis
def load_smarts_df(h5_data_files, add_dhi=True):
"""
Load SMARTS generated DataFrame from different files and combine them into a single DataFrame
:param h5_data_files:
:return:
"""
all_df = []
for data_file in h5_data_files:
df = pd.read_hdf(data_file)
all_df.append(df)
df = pd.concat(all_df)
if add_dhi == True:
df['DHI'] = df['GLOBL_TILT'] - df['BEAM_NORMAL']
return df
def integrate_timestamp(df, wavelength_step):
ndf = df.groupby(df.index).sum()
ndf = ndf.applymap(lambda x: x * wavelength_step) # the interval is 2 nm
return ndf
def integrate_by_day(df, time_interval):
wvl = pd.unique(df['WVLGTH'])
if len(wvl) > 1:
raise ValueError("it seems that this dataframe note integrated at each timestamp yet.\
Try integrate_timestamp first")
df['date'] = df.index.date
date_integral = df.groupby(['date']).sum().applymap(lambda x: x * time_interval)
return date_integral
def ideal_single_axis_tracker_tilt(azimuth, zenith):
"""
Calculate the ideal tilt angle of a single axis tracker with its axis oriented towards north-south.
The calculation uses Equation (1) in Ref.1
We follow the convetion of azimuth angle in SMARTS model:
north (0 deg), east(90 deg), south (180 deg), east(270 deg)
Also note that Equation (1) in Ref.1 uses elevation, whereas we use zenith angle here
Reference:
[1] Lorenzo, Narvarte, and Muñoz, “Tracking and back‐tracking,”
Prog Photovoltaics Res Appl, vol. 19, no. 6, pp. 747–753, 2011.
:param azimuth: azimuth angle in degree. array-like.
:param zenith: zenith angle in degree. array-like.
:return: tile angle of the tracker, azimuthal angle of the tracker
"""
tracker_tilt = np.arctan(
np.tan(zenith / 360 * 2 * np.pi) * np.sin((-azimuth - 180) / 360 * 2 * np.pi))
tracker_tilt = tracker_tilt / (2 * np.pi) * 360
tracker_azim = (tracker_tilt <= 0) * 180 + 90
return np.abs(tracker_tilt), tracker_azim
def single_axis_traker_angle(out_df: pd.DataFrame):
n_out_df=out_df.copy()
tilt, azim=ideal_single_axis_tracker_tilt(out_df['azimuth'].values, out_df['zenith'].values)
n_out_df['WAZIM'] = azim
n_out_df['TILT'] = tilt
return n_out_df
def smarts_spectrum_with_single_axis_tracker(time_range,cache_1='cache1.h5',
cache_2='cache2.h5',force_restart=False,
norm_2pass=True):
"""
Generate spectrum with single axis tracker
:param time_range: a datetime-like array
:param cache_1:
:param cache_2:
:param force_restart:
:param norm_2pass:
:return:
"""
if force_restart==False and os.path.exists(cache_2)==True:
print("Load data from cache.")
df = pd.read_hdf(cache_2, key='spec_df')
out_df = pd.read_hdf(cache_2, key='param_df')
return df,out_df
# Run the first pass to calculate the solar position
df,out_df=get_clear_sky(time_range,extend_dict={'TILT':-999})
df.to_hdf(cache_1,key='spec_df',mode='a')
out_df.to_hdf(cache_1,key='param_df',mode='a')
tracker_angle = singleaxis(apparent_azimuth=out_df['azimuth'],
apparent_zenith=out_df['zenith'],
backtrack=False)
# Add tracker angle into the parameter datatframe
out_df = pd.concat([out_df, tracker_angle], axis=1)
# Rename the tracker azimuth and tilt in order to feed them into 2nd pass SMARTS
out_df = out_df.rename(index=str, columns={"surface_azimuth": "WAZIM", "surface_tilt": "TILT"})
# Do the second pass to calculate the output spectrum by using single axis tracker
df,n_out_df=get_clear_sky(time_range,extend_df=out_df[['TILT','WAZIM']])
# Renormalize the direct normal incidence
if norm_2pass==True:
n_out_df['direct_norm_factor'] = n_out_df['direct_tilt'] / n_out_df['direct_normal']
df = pd.merge(left=df, right=n_out_df, left_index=True, right_index=True)
df['BEAM_NORMAL'] = df['BEAM_NORMAL'] * df['direct_norm_factor']
df.to_hdf(cache_2,key='spec_df',mode='a')
n_out_df.to_hdf(cache_2,key='param_df',mode='a')
return df,n_out_df
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.