repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
qeedquan/misc_utilities | math/controls/simple-gain-1.py | 1 | 1866 | """
https://lpsa.swarthmore.edu/Root_Locus/RootLocusWhy.html
"""
import numpy as np
import matplotlib.pyplot as plt
from sympy.abc import *
from sympy.integrals import inverse_laplace_transform
from sympy.solvers import solve
from sympy import *
def eval_transfer_function(G, R, K0, t0, t1):
G = G.subs(K, K0)
R = R.subs(K, K0)
C = inverse_laplace_transform(G*R, s, t)
x = np.linspace(t0, t1, 100)
y = np.array([C.subs(t, ts).evalf().subs(0, 1) for ts in x])
y = np.array([re(ys) for ys in y])
return (x, y)
"""
Assume transfer function is
G(s) = 1/(s*(s+3))
In a closed-loop feedback system with gain K
we have the following expression
H(s) = C(s)/R(s) = K*G(s) / (1 + K*G(s))
where C(s) is the output and R(s) is the input
Evaluating H(s) gives us
H(s) = K/(s**2 + 3*s + K)
We can control the gain K so we can vary it and look at the output
The input we will feed in this case is a step response (laplace transform is 1/s)
Since it is time consuming to try various K to see what works best, we can find K
another way
Solve for the roots of the poles
For the function
s**2 + 3*s + K gives us the characteristic roots
s = -3/2 +- sqrt(9 - 4K)/2
9 > 4K is overdamped (real roots)
9 < 4K is underdamped (complex roots)
9 = 4K is critically damped
Say we want critical damping, we solve for K to be 4.5
"""
D = s**2 + 3*s + K
G1 = K/D
R1 = 1/s
x1, y1 = eval_transfer_function(G1, R1, 1, 0, 10)
x2, y2 = eval_transfer_function(G1, R1, 10, 0, 10)
x3, y3 = eval_transfer_function(G1, R1, 100, 0, 10)
x4, y4 = eval_transfer_function(G1, R1, 4.5, 0, 10)
print("Roots", solve(D, s))
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.figure(1)
plt.plot(x1, y1, label='K=1')
plt.plot(x2, y2, label='K=10')
plt.plot(x3, y3, label='K=100')
plt.plot(x4, y4, label='K=4.5')
plt.legend(loc='best')
plt.savefig('simple-gain.png')
plt.show()
| mit |
scubamut/SimpleBacktester | backtest_helpers/backtest.py | 1 | 1659 | # THIS ONE MATCHES PV
# SEE PV backtest :https://goo.gl/lBR4K9
# AND spreadsheet : https://goo.gl/8KGp58
# and Quantopian backtest : https://goo.gl/xytT5L
def backtest(prices, weights, capital, offset=1, commission=0.):
import pandas as pd
rebalance_dates = weights.index
buy_dates = [prices.index[d + offset] for d in range(len(prices.index) - 1) if prices.index[d] in rebalance_dates]
print('FIRST BUY DATE = {}\n'.format(buy_dates[0]))
p_holdings = pd.DataFrame(0, index=prices.index, columns=prices.columns)
cash = 0.
for i, date in enumerate(prices.index):
if date in rebalance_dates:
# print ('--------------------------------------------------------------------')
new_weights = weights.loc[date]
p_holdings.iloc[i] = p_holdings.iloc[i - 1]
if date in buy_dates:
if date == buy_dates[0]:
p_holdings.loc[date] = (capital * weights.iloc[0] / prices.loc[date])
# print ('INIT', cash, p_holdings.iloc[i-1],prices.loc[date], new_weights)
else:
portfolio_value = cash + (p_holdings.iloc[i - 1] * prices.loc[date]).sum() * new_weights
p_holdings.iloc[i] = (portfolio_value / prices.loc[date]).fillna(0)
else:
p_holdings.iloc[i] = p_holdings.iloc[i - 1]
# print ('{} HOLDINGS UNCHANGED'.format(date))
p_value = (p_holdings * prices).sum(1)[p_holdings.index >= buy_dates[0]]
# print(p_holdings, )
p_weights = p_holdings.mul(prices).div(p_holdings.mul(prices).sum(axis=1), axis=0).fillna(0)
return p_value, p_holdings, p_weights | gpl-3.0 |
abhishekkrthakur/scikit-learn | benchmarks/bench_covertype.py | 154 | 7296 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/seaborn/categorical.py | 1 | 133468 | from __future__ import division
from textwrap import dedent
import colorsys
import numpy as np
from scipy import stats
import pandas as pd
from pandas.core.series import remove_na
import matplotlib as mpl
from matplotlib.collections import PatchCollection
import matplotlib.patches as Patches
import matplotlib.pyplot as plt
import warnings
from .external.six import string_types
from .external.six.moves import range
from . import utils
from .utils import iqr, categorical_order
from .algorithms import bootstrap
from .palettes import color_palette, husl_palette, light_palette
from .axisgrid import FacetGrid, _facet_docs
__all__ = ["boxplot", "violinplot", "stripplot", "swarmplot", "lvplot",
"pointplot", "barplot", "countplot", "factorplot"]
class _CategoricalPlotter(object):
width = .8
def establish_variables(self, x=None, y=None, hue=None, data=None,
orient=None, order=None, hue_order=None,
units=None):
"""Convert input specification into a common representation."""
# Option 1:
# We are plotting a wide-form dataset
# -----------------------------------
if x is None and y is None:
# Do a sanity check on the inputs
if hue is not None:
error = "Cannot use `hue` without `x` or `y`"
raise ValueError(error)
# No hue grouping with wide inputs
plot_hues = None
hue_title = None
hue_names = None
# No statistical units with wide inputs
plot_units = None
# We also won't get a axes labels here
value_label = None
group_label = None
# Option 1a:
# The input data is a Pandas DataFrame
# ------------------------------------
if isinstance(data, pd.DataFrame):
# Order the data correctly
if order is None:
order = []
# Reduce to just numeric columns
for col in data:
try:
data[col].astype(np.float)
order.append(col)
except ValueError:
pass
plot_data = data[order]
group_names = order
group_label = data.columns.name
# Convert to a list of arrays, the common representation
iter_data = plot_data.iteritems()
plot_data = [np.asarray(s, np.float) for k, s in iter_data]
# Option 1b:
# The input data is an array or list
# ----------------------------------
else:
# We can't reorder the data
if order is not None:
error = "Input data must be a pandas object to reorder"
raise ValueError(error)
# The input data is an array
if hasattr(data, "shape"):
if len(data.shape) == 1:
if np.isscalar(data[0]):
plot_data = [data]
else:
plot_data = list(data)
elif len(data.shape) == 2:
nr, nc = data.shape
if nr == 1 or nc == 1:
plot_data = [data.ravel()]
else:
plot_data = [data[:, i] for i in range(nc)]
else:
error = ("Input `data` can have no "
"more than 2 dimensions")
raise ValueError(error)
# Check if `data` is None to let us bail out here (for testing)
elif data is None:
plot_data = [[]]
# The input data is a flat list
elif np.isscalar(data[0]):
plot_data = [data]
# The input data is a nested list
# This will catch some things that might fail later
# but exhaustive checks are hard
else:
plot_data = data
# Convert to a list of arrays, the common representation
plot_data = [np.asarray(d, np.float) for d in plot_data]
# The group names will just be numeric indices
group_names = list(range((len(plot_data))))
# Figure out the plotting orientation
orient = "h" if str(orient).startswith("h") else "v"
# Option 2:
# We are plotting a long-form dataset
# -----------------------------------
else:
# See if we need to get variables from `data`
if data is not None:
x = data.get(x, x)
y = data.get(y, y)
hue = data.get(hue, hue)
units = data.get(units, units)
# Validate the inputs
for input in [x, y, hue, units]:
if isinstance(input, string_types):
err = "Could not interpret input '{}'".format(input)
raise ValueError(err)
# Figure out the plotting orientation
orient = self.infer_orient(x, y, orient)
# Option 2a:
# We are plotting a single set of data
# ------------------------------------
if x is None or y is None:
# Determine where the data are
vals = y if x is None else x
# Put them into the common representation
plot_data = [np.asarray(vals)]
# Get a label for the value axis
if hasattr(vals, "name"):
value_label = vals.name
else:
value_label = None
# This plot will not have group labels or hue nesting
groups = None
group_label = None
group_names = []
plot_hues = None
hue_names = None
hue_title = None
plot_units = None
# Option 2b:
# We are grouping the data values by another variable
# ---------------------------------------------------
else:
# Determine which role each variable will play
if orient == "v":
vals, groups = y, x
else:
vals, groups = x, y
# Get the categorical axis label
group_label = None
if hasattr(groups, "name"):
group_label = groups.name
# Get the order on the categorical axis
group_names = categorical_order(groups, order)
# Group the numeric data
plot_data, value_label = self._group_longform(vals, groups,
group_names)
# Now handle the hue levels for nested ordering
if hue is None:
plot_hues = None
hue_title = None
hue_names = None
else:
# Get the order of the hue levels
hue_names = categorical_order(hue, hue_order)
# Group the hue data
plot_hues, hue_title = self._group_longform(hue, groups,
group_names)
# Now handle the units for nested observations
if units is None:
plot_units = None
else:
plot_units, _ = self._group_longform(units, groups,
group_names)
# Assign object attributes
# ------------------------
self.orient = orient
self.plot_data = plot_data
self.group_label = group_label
self.value_label = value_label
self.group_names = group_names
self.plot_hues = plot_hues
self.hue_title = hue_title
self.hue_names = hue_names
self.plot_units = plot_units
def _group_longform(self, vals, grouper, order):
"""Group a long-form variable by another with correct order."""
# Ensure that the groupby will work
if not isinstance(vals, pd.Series):
vals = pd.Series(vals)
# Group the val data
grouped_vals = vals.groupby(grouper)
out_data = []
for g in order:
try:
g_vals = np.asarray(grouped_vals.get_group(g))
except KeyError:
g_vals = np.array([])
out_data.append(g_vals)
# Get the vals axis label
label = vals.name
return out_data, label
def establish_colors(self, color, palette, saturation):
"""Get a list of colors for the main component of the plots."""
if self.hue_names is None:
n_colors = len(self.plot_data)
else:
n_colors = len(self.hue_names)
# Determine the main colors
if color is None and palette is None:
# Determine whether the current palette will have enough values
# If not, we'll default to the husl palette so each is distinct
current_palette = utils.get_color_cycle()
if n_colors <= len(current_palette):
colors = color_palette(n_colors=n_colors)
else:
colors = husl_palette(n_colors, l=.7)
elif palette is None:
# When passing a specific color, the interpretation depends
# on whether there is a hue variable or not.
# If so, we will make a blend palette so that the different
# levels have some amount of variation.
if self.hue_names is None:
colors = [color] * n_colors
else:
colors = light_palette(color, n_colors)
else:
# Let `palette` be a dict mapping level to color
if isinstance(palette, dict):
if self.hue_names is None:
levels = self.group_names
else:
levels = self.hue_names
palette = [palette[l] for l in levels]
colors = color_palette(palette, n_colors)
# Desaturate a bit because these are patches
if saturation < 1:
colors = color_palette(colors, desat=saturation)
# Conver the colors to a common representations
rgb_colors = color_palette(colors)
# Determine the gray color to use for the lines framing the plot
light_vals = [colorsys.rgb_to_hls(*c)[1] for c in rgb_colors]
l = min(light_vals) * .6
gray = mpl.colors.rgb2hex((l, l, l))
# Assign object attributes
self.colors = rgb_colors
self.gray = gray
def infer_orient(self, x, y, orient=None):
"""Determine how the plot should be oriented based on the data."""
orient = str(orient)
def is_categorical(s):
try:
# Correct way, but doesnt exist in older Pandas
return pd.core.common.is_categorical_dtype(s)
except AttributeError:
# Also works, but feels hackier
return str(s.dtype) == "categorical"
def is_not_numeric(s):
try:
np.asarray(s, dtype=np.float)
except ValueError:
return True
return False
no_numeric = "Neither the `x` nor `y` variable appears to be numeric."
if orient.startswith("v"):
return "v"
elif orient.startswith("h"):
return "h"
elif x is None:
return "v"
elif y is None:
return "h"
elif is_categorical(y):
if is_categorical(x):
raise ValueError(no_numeric)
else:
return "h"
elif is_not_numeric(y):
if is_not_numeric(x):
raise ValueError(no_numeric)
else:
return "h"
else:
return "v"
@property
def hue_offsets(self):
"""A list of center positions for plots when hue nesting is used."""
n_levels = len(self.hue_names)
each_width = self.width / n_levels
offsets = np.linspace(0, self.width - each_width, n_levels)
offsets -= offsets.mean()
return offsets
@property
def nested_width(self):
"""A float with the width of plot elements when hue nesting is used."""
return self.width / len(self.hue_names) * .98
def annotate_axes(self, ax):
"""Add descriptive labels to an Axes object."""
if self.orient == "v":
xlabel, ylabel = self.group_label, self.value_label
else:
xlabel, ylabel = self.value_label, self.group_label
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if self.orient == "v":
ax.set_xticks(np.arange(len(self.plot_data)))
ax.set_xticklabels(self.group_names)
else:
ax.set_yticks(np.arange(len(self.plot_data)))
ax.set_yticklabels(self.group_names)
if self.orient == "v":
ax.xaxis.grid(False)
ax.set_xlim(-.5, len(self.plot_data) - .5)
else:
ax.yaxis.grid(False)
ax.set_ylim(-.5, len(self.plot_data) - .5)
if self.hue_names is not None:
leg = ax.legend(loc="best")
if self.hue_title is not None:
leg.set_title(self.hue_title)
# Set the title size a roundabout way to maintain
# compatability with matplotlib 1.1
try:
title_size = mpl.rcParams["axes.labelsize"] * .85
except TypeError: # labelsize is something like "large"
title_size = mpl.rcParams["axes.labelsize"]
prop = mpl.font_manager.FontProperties(size=title_size)
leg._legend_title_box._text.set_font_properties(prop)
def add_legend_data(self, ax, color, label):
"""Add a dummy patch object so we can get legend data."""
rect = plt.Rectangle([0, 0], 0, 0,
linewidth=self.linewidth / 2,
edgecolor=self.gray,
facecolor=color,
label=label)
ax.add_patch(rect)
class _BoxPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, fliersize, linewidth):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.width = width
self.fliersize = fliersize
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def draw_boxplot(self, ax, kws):
"""Use matplotlib to draw a boxplot on an Axes."""
vert = self.orient == "v"
props = {}
for obj in ["box", "whisker", "cap", "median", "flier"]:
props[obj] = kws.pop(obj + "props", {})
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None:
# Handle case where there is data at this level
if group_data.size == 0:
continue
# Draw a single box or a set of boxes
# with a single level of grouping
box_data = remove_na(group_data)
# Handle case where there is no non-null data
if box_data.size == 0:
continue
artist_dict = ax.boxplot(box_data,
vert=vert,
patch_artist=True,
positions=[i],
widths=self.width,
**kws)
color = self.colors[i]
self.restyle_boxplot(artist_dict, color, props)
else:
# Draw nested groups of boxes
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
# Add a legend for this hue level
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle case where there is data at this level
if group_data.size == 0:
continue
hue_mask = self.plot_hues[i] == hue_level
box_data = remove_na(group_data[hue_mask])
# Handle case where there is no non-null data
if box_data.size == 0:
continue
center = i + offsets[j]
artist_dict = ax.boxplot(box_data,
vert=vert,
patch_artist=True,
positions=[center],
widths=self.nested_width,
**kws)
self.restyle_boxplot(artist_dict, self.colors[j], props)
# Add legend data, but just for one set of boxes
def restyle_boxplot(self, artist_dict, color, props):
"""Take a drawn matplotlib boxplot and make it look nice."""
for box in artist_dict["boxes"]:
box.update(dict(facecolor=color,
zorder=.9,
edgecolor=self.gray,
linewidth=self.linewidth))
box.update(props["box"])
for whisk in artist_dict["whiskers"]:
whisk.update(dict(color=self.gray,
linewidth=self.linewidth,
linestyle="-"))
whisk.update(props["whisker"])
for cap in artist_dict["caps"]:
cap.update(dict(color=self.gray,
linewidth=self.linewidth))
cap.update(props["cap"])
for med in artist_dict["medians"]:
med.update(dict(color=self.gray,
linewidth=self.linewidth))
med.update(props["median"])
for fly in artist_dict["fliers"]:
fly.update(dict(markerfacecolor=self.gray,
marker="d",
markeredgecolor=self.gray,
markersize=self.fliersize))
fly.update(props["flier"])
def plot(self, ax, boxplot_kws):
"""Make the plot."""
self.draw_boxplot(ax, boxplot_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _ViolinPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
bw, cut, scale, scale_hue, gridsize,
width, inner, split, orient, linewidth,
color, palette, saturation):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.estimate_densities(bw, cut, scale, scale_hue, gridsize)
self.gridsize = gridsize
self.width = width
if inner is not None:
if not any([inner.startswith("quart"),
inner.startswith("box"),
inner.startswith("stick"),
inner.startswith("point")]):
err = "Inner style '{}' not recognized".format(inner)
raise ValueError(err)
self.inner = inner
if split and self.hue_names is not None and len(self.hue_names) != 2:
raise ValueError("Cannot use `split` with more than 2 hue levels.")
self.split = split
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def estimate_densities(self, bw, cut, scale, scale_hue, gridsize):
"""Find the support and density for all of the data."""
# Initialize data structures to keep track of plotting data
if self.hue_names is None:
support = []
density = []
counts = np.zeros(len(self.plot_data))
max_density = np.zeros(len(self.plot_data))
else:
support = [[] for _ in self.plot_data]
density = [[] for _ in self.plot_data]
size = len(self.group_names), len(self.hue_names)
counts = np.zeros(size)
max_density = np.zeros(size)
for i, group_data in enumerate(self.plot_data):
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
# Strip missing datapoints
kde_data = remove_na(group_data)
# Handle special case of no data at this level
if kde_data.size == 0:
support.append(np.array([]))
density.append(np.array([1.]))
counts[i] = 0
max_density[i] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support.append(np.unique(kde_data))
density.append(np.array([1.]))
counts[i] = 1
max_density[i] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_i = self.kde_support(kde_data, bw_used, cut, gridsize)
density_i = kde.evaluate(support_i)
# Update the data structures with these results
support.append(support_i)
density.append(density_i)
counts[i] = kde_data.size
max_density[i] = density_i.max()
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
for j, hue_level in enumerate(self.hue_names):
# Handle special case of no data at this category level
if not group_data.size:
support[i].append(np.array([]))
density[i].append(np.array([1.]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Select out the observations for this hue level
hue_mask = self.plot_hues[i] == hue_level
# Strip missing datapoints
kde_data = remove_na(group_data[hue_mask])
# Handle special case of no data at this level
if kde_data.size == 0:
support[i].append(np.array([]))
density[i].append(np.array([1.]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support[i].append(np.unique(kde_data))
density[i].append(np.array([1.]))
counts[i, j] = 1
max_density[i, j] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_ij = self.kde_support(kde_data, bw_used,
cut, gridsize)
density_ij = kde.evaluate(support_ij)
# Update the data structures with these results
support[i].append(support_ij)
density[i].append(density_ij)
counts[i, j] = kde_data.size
max_density[i, j] = density_ij.max()
# Scale the height of the density curve.
# For a violinplot the density is non-quantitative.
# The objective here is to scale the curves relative to 1 so that
# they can be multiplied by the width parameter during plotting.
if scale == "area":
self.scale_area(density, max_density, scale_hue)
elif scale == "width":
self.scale_width(density)
elif scale == "count":
self.scale_count(density, counts, scale_hue)
else:
raise ValueError("scale method '{}' not recognized".format(scale))
# Set object attributes that will be used while plotting
self.support = support
self.density = density
def fit_kde(self, x, bw):
"""Estimate a KDE for a vector of data with flexible bandwidth."""
# Allow for the use of old scipy where `bw` is fixed
try:
kde = stats.gaussian_kde(x, bw)
except TypeError:
kde = stats.gaussian_kde(x)
if bw != "scott": # scipy default
msg = ("Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth.")
warnings.warn(msg, UserWarning)
# Extract the numeric bandwidth from the KDE object
bw_used = kde.factor
# At this point, bw will be a numeric scale factor.
# To get the actual bandwidth of the kernel, we multiple by the
# unbiased standard deviation of the data, which we will use
# elsewhere to compute the range of the support.
bw_used = bw_used * x.std(ddof=1)
return kde, bw_used
def kde_support(self, x, bw, cut, gridsize):
"""Define a grid of support for the violin."""
support_min = x.min() - bw * cut
support_max = x.max() + bw * cut
return np.linspace(support_min, support_max, gridsize)
def scale_area(self, density, max_density, scale_hue):
"""Scale the relative area under the KDE curve.
This essentially preserves the "standard" KDE scaling, but the
resulting maximum density will be 1 so that the curve can be
properly multiplied by the violin width.
"""
if self.hue_names is None:
for d in density:
if d.size > 1:
d /= max_density.max()
else:
for i, group in enumerate(density):
for d in group:
if scale_hue:
max = max_density[i].max()
else:
max = max_density.max()
if d.size > 1:
d /= max
def scale_width(self, density):
"""Scale each density curve to the same height."""
if self.hue_names is None:
for d in density:
d /= d.max()
else:
for group in density:
for d in group:
d /= d.max()
def scale_count(self, density, counts, scale_hue):
"""Scale each density curve by the number of observations."""
if self.hue_names is None:
for count, d in zip(counts, density):
d /= d.max()
d *= count / counts.max()
else:
for i, group in enumerate(density):
for j, d in enumerate(group):
count = counts[i, j]
if scale_hue:
scaler = count / counts[i].max()
else:
scaler = count / counts.max()
d /= d.max()
d *= scaler
@property
def dwidth(self):
if self.hue_names is None:
return self.width / 2
elif self.split:
return self.width / 2
else:
return self.width / (2 * len(self.hue_names))
def draw_violins(self, ax):
"""Draw the violins onto `ax`."""
fill_func = ax.fill_betweenx if self.orient == "v" else ax.fill_between
for i, group_data in enumerate(self.plot_data):
kws = dict(edgecolor=self.gray, linewidth=self.linewidth)
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
support, density = self.support[i], self.density[i]
# Handle special case of no observations in this bin
if support.size == 0:
continue
# Handle special case of a single observation
elif support.size == 1:
val = np.asscalar(support)
d = np.asscalar(density)
self.draw_single_observation(ax, i, val, d)
continue
# Draw the violin for this group
grid = np.ones(self.gridsize) * i
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
facecolor=self.colors[i],
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data, support, density, i)
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data, support, density, i)
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data, support, density, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
support, density = self.support[i][j], self.density[i][j]
kws["color"] = self.colors[j]
# Add legend data, but just for one set of violins
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle the special case where we have no observations
if support.size == 0:
continue
# Handle the special case where we have one observation
elif support.size == 1:
val = np.asscalar(support)
d = np.asscalar(density)
if self.split:
d = d / 2
at_group = i + offsets[j]
self.draw_single_observation(ax, at_group, val, d)
continue
# Option 2a: we are drawing a single split violin
# -----------------------------------------------
if self.split:
grid = np.ones(self.gridsize) * i
if j:
fill_func(support,
grid,
grid + density * self.dwidth,
**kws)
else:
fill_func(support,
grid - density * self.dwidth,
grid,
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw quartile lines
if self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density, i,
["left", "right"][j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density, i,
["left", "right"][j])
# The box and point interior plots are drawn for
# all data at the group level, so we just do that once
if not j:
continue
# Get the whole vector for this group level
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data,
support, density, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2b: we are drawing full nested violins
# -----------------------------------------------
else:
grid = np.ones(self.gridsize) * (i + offsets[j])
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
**kws)
# Draw the interior representation
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data,
support, density,
i + offsets[j])
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density,
i + offsets[j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density,
i + offsets[j])
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i + offsets[j])
def draw_single_observation(self, ax, at_group, at_quant, density):
"""Draw a line to mark a single observation."""
d_width = density * self.dwidth
if self.orient == "v":
ax.plot([at_group - d_width, at_group + d_width],
[at_quant, at_quant],
color=self.gray,
linewidth=self.linewidth)
else:
ax.plot([at_quant, at_quant],
[at_group - d_width, at_group + d_width],
color=self.gray,
linewidth=self.linewidth)
def draw_box_lines(self, ax, data, support, density, center):
"""Draw boxplot information at center of the density."""
# Compute the boxplot statistics
q25, q50, q75 = np.percentile(data, [25, 50, 75])
whisker_lim = 1.5 * iqr(data)
h1 = np.min(data[data >= (q25 - whisker_lim)])
h2 = np.max(data[data <= (q75 + whisker_lim)])
# Draw a boxplot using lines and a point
if self.orient == "v":
ax.plot([center, center], [h1, h2],
linewidth=self.linewidth,
color=self.gray)
ax.plot([center, center], [q25, q75],
linewidth=self.linewidth * 3,
color=self.gray)
ax.scatter(center, q50,
zorder=3,
color="white",
edgecolor=self.gray,
s=np.square(self.linewidth * 2))
else:
ax.plot([h1, h2], [center, center],
linewidth=self.linewidth,
color=self.gray)
ax.plot([q25, q75], [center, center],
linewidth=self.linewidth * 3,
color=self.gray)
ax.scatter(q50, center,
zorder=3,
color="white",
edgecolor=self.gray,
s=np.square(self.linewidth * 2))
def draw_quartiles(self, ax, data, support, density, center, split=False):
"""Draw the quartiles as lines at width of density."""
q25, q50, q75 = np.percentile(data, [25, 50, 75])
self.draw_to_density(ax, center, q25, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
self.draw_to_density(ax, center, q50, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 3] * 2)
self.draw_to_density(ax, center, q75, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
def draw_points(self, ax, data, center):
"""Draw individual observations as points at middle of the violin."""
kws = dict(s=np.square(self.linewidth * 2),
color=self.gray,
edgecolor=self.gray)
grid = np.ones(len(data)) * center
if self.orient == "v":
ax.scatter(grid, data, **kws)
else:
ax.scatter(data, grid, **kws)
def draw_stick_lines(self, ax, data, support, density,
center, split=False):
"""Draw individual observations as sticks at width of density."""
for val in data:
self.draw_to_density(ax, center, val, support, density, split,
linewidth=self.linewidth * .5)
def draw_to_density(self, ax, center, val, support, density, split, **kws):
"""Draw a line orthogonal to the value axis at width of density."""
idx = np.argmin(np.abs(support - val))
width = self.dwidth * density[idx] * .99
kws["color"] = self.gray
if self.orient == "v":
if split == "left":
ax.plot([center - width, center], [val, val], **kws)
elif split == "right":
ax.plot([center, center + width], [val, val], **kws)
else:
ax.plot([center - width, center + width], [val, val], **kws)
else:
if split == "left":
ax.plot([val, val], [center - width, center], **kws)
elif split == "right":
ax.plot([val, val], [center, center + width], **kws)
else:
ax.plot([val, val], [center - width, center + width], **kws)
def plot(self, ax):
"""Make the violin plot."""
self.draw_violins(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _CategoricalScatterPlotter(_CategoricalPlotter):
@property
def point_colors(self):
"""Return a color for each scatter point based on group and hue."""
colors = []
for i, group_data in enumerate(self.plot_data):
# Initialize the array for this group level
group_colors = np.empty((group_data.size, 3))
if self.plot_hues is None:
# Use the same color for all points at this level
group_color = self.colors[i]
group_colors[:] = group_color
else:
# Color the points based on the hue level
for j, level in enumerate(self.hue_names):
hue_color = self.colors[j]
if group_data.size:
group_colors[self.plot_hues[i] == level] = hue_color
colors.append(group_colors)
return colors
def add_legend_data(self, ax):
"""Add empty scatterplot artists with labels for the legend."""
if self.hue_names is not None:
for rgb, label in zip(self.colors, self.hue_names):
ax.scatter([], [],
color=mpl.colors.rgb2hex(rgb),
label=label,
s=60)
class _StripPlotter(_CategoricalScatterPlotter):
"""1-d scatterplot with categorical organization."""
def __init__(self, x, y, hue, data, order, hue_order,
jitter, split, orient, color, palette):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, 1)
# Set object attributes
self.split = split
self.width = .8
if jitter == 1: # Use a good default for `jitter = True`
jlim = 0.1
else:
jlim = float(jitter)
if self.hue_names is not None and split:
jlim /= len(self.hue_names)
self.jitterer = stats.uniform(-jlim, jlim * 2).rvs
def draw_stripplot(self, ax, kws):
"""Draw the points onto `ax`."""
# Set the default zorder to 2.1, so that the points
# will be drawn on top of line elements (like in a boxplot)
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None or not self.split:
if self.hue_names is None:
hue_mask = np.ones(group_data.size, np.bool)
else:
hue_mask = np.array([h in self.hue_names
for h in self.plot_hues[i]], np.bool)
# Broken on older numpys
# hue_mask = np.in1d(self.plot_hues[i], self.hue_names)
strip_data = group_data[hue_mask]
# Plot the points in centered positions
cat_pos = np.ones(strip_data.size) * i
cat_pos += self.jitterer(len(strip_data))
kws.update(c=self.point_colors[i][hue_mask])
if self.orient == "v":
ax.scatter(cat_pos, strip_data, **kws)
else:
ax.scatter(strip_data, cat_pos, **kws)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
hue_mask = self.plot_hues[i] == hue_level
strip_data = group_data[hue_mask]
# Plot the points in centered positions
center = i + offsets[j]
cat_pos = np.ones(strip_data.size) * center
cat_pos += self.jitterer(len(strip_data))
kws.update(c=self.point_colors[i][hue_mask])
if self.orient == "v":
ax.scatter(cat_pos, strip_data, **kws)
else:
ax.scatter(strip_data, cat_pos, **kws)
def plot(self, ax, kws):
"""Make the plot."""
self.draw_stripplot(ax, kws)
self.add_legend_data(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _SwarmPlotter(_CategoricalScatterPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
split, orient, color, palette):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, 1)
# Set object attributes
self.split = split
self.width = .8
def overlap(self, xy_i, xy_j, d):
"""Return True if two circles with the same diameter will overlap."""
x_i, y_i = xy_i
x_j, y_j = xy_j
return ((x_i - x_j) ** 2 + (y_i - y_j) ** 2) < (d ** 2)
def could_overlap(self, xy_i, swarm, d):
"""Return a list of all swarm points that could overlap with target.
Assumes that swarm is a sorted list of all points below xy_i.
"""
_, y_i = xy_i
neighbors = []
for xy_j in reversed(swarm):
_, y_j = xy_j
if (y_i - y_j) < d:
neighbors.append(xy_j)
else:
break
return list(reversed(neighbors))
def position_candidates(self, xy_i, neighbors, d):
"""Return a list of (x, y) coordinates that might be valid."""
candidates = [xy_i]
x_i, y_i = xy_i
left_first = True
for x_j, y_j in neighbors:
dy = y_i - y_j
dx = np.sqrt(d ** 2 - dy ** 2) * 1.05
cl, cr = (x_j - dx, y_i), (x_j + dx, y_i)
if left_first:
new_candidates = [cl, cr]
else:
new_candidates = [cr, cl]
candidates.extend(new_candidates)
left_first = not left_first
return candidates
def prune_candidates(self, candidates, neighbors, d):
"""Remove candidates from the list if they overlap with the swarm."""
good_candidates = []
for xy_i in candidates:
good_candidate = True
for xy_j in neighbors:
if self.overlap(xy_i, xy_j, d):
good_candidate = False
if good_candidate:
good_candidates.append(xy_i)
return np.array(good_candidates)
def beeswarm(self, orig_xy, d):
"""Adjust x position of points to avoid overlaps."""
# In this method, ``x`` is always the categorical axis
# Center of the swarm, in point coordinates
midline = orig_xy[0, 0]
# Start the swarm with the first point
swarm = [orig_xy[0]]
# Loop over the remaining points
for xy_i in orig_xy[1:]:
# Find the points in the swarm that could possibly
# overlap with the point we are currently placing
neighbors = self.could_overlap(xy_i, swarm, d)
# Find positions that would be valid individually
# with respect to each of the swarm neighbors
candidates = self.position_candidates(xy_i, neighbors, d)
# Remove the positions that overlap with any of the
# other neighbors
candidates = self.prune_candidates(candidates, neighbors, d)
# Find the most central of the remaining positions
offsets = np.abs(candidates[:, 0] - midline)
best_index = np.argmin(offsets)
new_xy_i = candidates[best_index]
swarm.append(new_xy_i)
return np.array(swarm)
def add_gutters(self, points, center, width):
"""Stop points from extending beyond their territory."""
half_width = width / 2
low_gutter = center - half_width
off_low = points < low_gutter
if off_low.any():
points[off_low] = low_gutter
high_gutter = center + half_width
off_high = points > high_gutter
if off_high.any():
points[off_high] = high_gutter
return points
def swarm_points(self, ax, points, center, width, s, **kws):
"""Find new positions on the categorical axis for each point."""
# Convert from point size (area) to diameter
default_lw = mpl.rcParams["patch.linewidth"]
lw = kws.get("linewidth", kws.get("lw", default_lw))
d = np.sqrt(s) + lw
# Transform the data coordinates to point coordinates.
# We'll figure out the swarm positions in the latter
# and then convert back to data coordinates and replot
orig_xy = ax.transData.transform(points.get_offsets())
# Order the variables so that x is the caegorical axis
if self.orient == "h":
orig_xy = orig_xy[:, [1, 0]]
# Do the beeswarm in point coordinates
new_xy = self.beeswarm(orig_xy, d)
# Transform the point coordinates back to data coordinates
if self.orient == "h":
new_xy = new_xy[:, [1, 0]]
new_x, new_y = ax.transData.inverted().transform(new_xy).T
# Add gutters
if self.orient == "v":
self.add_gutters(new_x, center, width)
else:
self.add_gutters(new_y, center, width)
# Reposition the points so they do not overlap
points.set_offsets(np.c_[new_x, new_y])
def draw_swarmplot(self, ax, kws):
"""Plot the data."""
s = kws.pop("s")
centers = []
swarms = []
# Set the categorical axes limits here for the swarm math
if self.orient == "v":
ax.set_xlim(-.5, len(self.plot_data) - .5)
else:
ax.set_ylim(-.5, len(self.plot_data) - .5)
# Plot each swarm
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None or not self.split:
width = self.width
if self.hue_names is None:
hue_mask = np.ones(group_data.size, np.bool)
else:
hue_mask = np.array([h in self.hue_names
for h in self.plot_hues[i]], np.bool)
# Broken on older numpys
# hue_mask = np.in1d(self.plot_hues[i], self.hue_names)
swarm_data = group_data[hue_mask]
# Sort the points for the beeswarm algorithm
sorter = np.argsort(swarm_data)
swarm_data = swarm_data[sorter]
point_colors = self.point_colors[i][hue_mask][sorter]
# Plot the points in centered positions
cat_pos = np.ones(swarm_data.size) * i
kws.update(c=point_colors)
if self.orient == "v":
points = ax.scatter(cat_pos, swarm_data, s=s, **kws)
else:
points = ax.scatter(swarm_data, cat_pos, s=s, **kws)
centers.append(i)
swarms.append(points)
else:
offsets = self.hue_offsets
width = self.nested_width
for j, hue_level in enumerate(self.hue_names):
hue_mask = self.plot_hues[i] == hue_level
swarm_data = group_data[hue_mask]
# Sort the points for the beeswarm algorithm
sorter = np.argsort(swarm_data)
swarm_data = swarm_data[sorter]
point_colors = self.point_colors[i][hue_mask][sorter]
# Plot the points in centered positions
center = i + offsets[j]
cat_pos = np.ones(swarm_data.size) * center
kws.update(c=point_colors)
if self.orient == "v":
points = ax.scatter(cat_pos, swarm_data, s=s, **kws)
else:
points = ax.scatter(swarm_data, cat_pos, s=s, **kws)
centers.append(center)
swarms.append(points)
# Update the position of each point on the categorical axis
# Do this after plotting so that the numerical axis limits are correct
for center, swarm in zip(centers, swarms):
if swarm.get_offsets().size:
self.swarm_points(ax, swarm, center, width, s, **kws)
def plot(self, ax, kws):
"""Make the full plot."""
self.draw_swarmplot(ax, kws)
self.add_legend_data(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _CategoricalStatPlotter(_CategoricalPlotter):
@property
def nested_width(self):
"""A float with the width of plot elements when hue nesting is used."""
return self.width / len(self.hue_names)
def estimate_statistic(self, estimator, ci, n_boot):
if self.hue_names is None:
statistic = []
confint = []
else:
statistic = [[] for _ in self.plot_data]
confint = [[] for _ in self.plot_data]
for i, group_data in enumerate(self.plot_data):
# Option 1: we have a single layer of grouping
# --------------------------------------------
if self.plot_hues is None:
if self.plot_units is None:
stat_data = remove_na(group_data)
unit_data = None
else:
unit_data = self.plot_units[i]
have = pd.notnull(np.c_[group_data, unit_data]).all(axis=1)
stat_data = group_data[have]
unit_data = unit_data[have]
# Estimate a statistic from the vector of data
if not stat_data.size:
statistic.append(np.nan)
else:
statistic.append(estimator(stat_data))
# Get a confidence interval for this estimate
if ci is not None:
if stat_data.size < 2:
confint.append([np.nan, np.nan])
continue
boots = bootstrap(stat_data, func=estimator,
n_boot=n_boot,
units=unit_data)
confint.append(utils.ci(boots, ci))
# Option 2: we are grouping by a hue layer
# ----------------------------------------
else:
for j, hue_level in enumerate(self.hue_names):
if not self.plot_hues[i].size:
statistic[i].append(np.nan)
if ci is not None:
confint[i].append((np.nan, np.nan))
continue
hue_mask = self.plot_hues[i] == hue_level
if self.plot_units is None:
stat_data = remove_na(group_data[hue_mask])
unit_data = None
else:
group_units = self.plot_units[i]
have = pd.notnull(
np.c_[group_data, group_units]
).all(axis=1)
stat_data = group_data[hue_mask & have]
unit_data = group_units[hue_mask & have]
# Estimate a statistic from the vector of data
if not stat_data.size:
statistic[i].append(np.nan)
else:
statistic[i].append(estimator(stat_data))
# Get a confidence interval for this estimate
if ci is not None:
if stat_data.size < 2:
confint[i].append([np.nan, np.nan])
continue
boots = bootstrap(stat_data, func=estimator,
n_boot=n_boot,
units=unit_data)
confint[i].append(utils.ci(boots, ci))
# Save the resulting values for plotting
self.statistic = np.array(statistic)
self.confint = np.array(confint)
# Rename the value label to reflect the estimation
if self.value_label is not None:
self.value_label = "{}({})".format(estimator.__name__,
self.value_label)
def draw_confints(self, ax, at_group, confint, colors,
errwidth=None, capsize=None, **kws):
if errwidth is not None:
kws.setdefault("lw", errwidth)
else:
kws.setdefault("lw", mpl.rcParams["lines.linewidth"] * 1.8)
for at, (ci_low, ci_high), color in zip(at_group,
confint,
colors):
if self.orient == "v":
ax.plot([at, at], [ci_low, ci_high], color=color, **kws)
if capsize is not None:
ax.plot([at - capsize / 2, at + capsize / 2],
[ci_low, ci_low], color=color, **kws)
ax.plot([at - capsize / 2, at + capsize / 2],
[ci_high, ci_high], color=color, **kws)
else:
ax.plot([ci_low, ci_high], [at, at], color=color, **kws)
if capsize is not None:
ax.plot([ci_low, ci_low],
[at - capsize / 2, at + capsize / 2],
color=color, **kws)
ax.plot([ci_high, ci_high],
[at - capsize / 2, at + capsize / 2],
color=color, **kws)
class _BarPlotter(_CategoricalStatPlotter):
"""Show point estimates and confidence intervals with bars."""
def __init__(self, x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
orient, color, palette, saturation, errcolor, errwidth=None,
capsize=None):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient,
order, hue_order, units)
self.establish_colors(color, palette, saturation)
self.estimate_statistic(estimator, ci, n_boot)
self.errcolor = errcolor
self.errwidth = errwidth
self.capsize = capsize
def draw_bars(self, ax, kws):
"""Draw the bars onto `ax`."""
# Get the right matplotlib function depending on the orientation
barfunc = ax.bar if self.orient == "v" else ax.barh
barpos = np.arange(len(self.statistic))
if self.plot_hues is None:
# Draw the bars
barfunc(barpos, self.statistic, self.width,
color=self.colors, align="center", **kws)
# Draw the confidence intervals
errcolors = [self.errcolor] * len(barpos)
self.draw_confints(ax,
barpos,
self.confint,
errcolors,
self.errwidth,
self.capsize)
else:
for j, hue_level in enumerate(self.hue_names):
# Draw the bars
offpos = barpos + self.hue_offsets[j]
barfunc(offpos, self.statistic[:, j], self.nested_width,
color=self.colors[j], align="center",
label=hue_level, **kws)
# Draw the confidence intervals
if self.confint.size:
confint = self.confint[:, j]
errcolors = [self.errcolor] * len(offpos)
self.draw_confints(ax,
offpos,
confint,
errcolors,
self.errwidth,
self.capsize)
def plot(self, ax, bar_kws):
"""Make the plot."""
self.draw_bars(ax, bar_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _PointPlotter(_CategoricalStatPlotter):
"""Show point estimates and confidence intervals with (joined) points."""
def __init__(self, x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
markers, linestyles, dodge, join, scale,
orient, color, palette, errwidth=None, capsize=None):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient,
order, hue_order, units)
self.establish_colors(color, palette, 1)
self.estimate_statistic(estimator, ci, n_boot)
# Override the default palette for single-color plots
if hue is None and color is None and palette is None:
self.colors = [color_palette()[0]] * len(self.colors)
# Don't join single-layer plots with different colors
if hue is None and palette is not None:
join = False
# Use a good default for `dodge=True`
if dodge is True and self.hue_names is not None:
dodge = .025 * len(self.hue_names)
# Make sure we have a marker for each hue level
if isinstance(markers, string_types):
markers = [markers] * len(self.colors)
self.markers = markers
# Make sure we have a line style for each hue level
if isinstance(linestyles, string_types):
linestyles = [linestyles] * len(self.colors)
self.linestyles = linestyles
# Set the other plot components
self.dodge = dodge
self.join = join
self.scale = scale
self.errwidth = errwidth
self.capsize = capsize
@property
def hue_offsets(self):
"""Offsets relative to the center position for each hue level."""
offset = np.linspace(0, self.dodge, len(self.hue_names))
offset -= offset.mean()
return offset
def draw_points(self, ax):
"""Draw the main data components of the plot."""
# Get the center positions on the categorical axis
pointpos = np.arange(len(self.statistic))
# Get the size of the plot elements
lw = mpl.rcParams["lines.linewidth"] * 1.8 * self.scale
mew = lw * .75
markersize = np.pi * np.square(lw) * 2
if self.plot_hues is None:
# Draw lines joining each estimate point
if self.join:
color = self.colors[0]
ls = self.linestyles[0]
if self.orient == "h":
ax.plot(self.statistic, pointpos,
color=color, ls=ls, lw=lw)
else:
ax.plot(pointpos, self.statistic,
color=color, ls=ls, lw=lw)
# Draw the confidence intervals
self.draw_confints(ax, pointpos, self.confint, self.colors,
self.errwidth, self.capsize)
# Draw the estimate points
marker = self.markers[0]
if self.orient == "h":
ax.scatter(self.statistic, pointpos,
linewidth=mew, marker=marker, s=markersize,
c=self.colors, edgecolor=self.colors)
else:
ax.scatter(pointpos, self.statistic,
linewidth=mew, marker=marker, s=markersize,
c=self.colors, edgecolor=self.colors)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
# Determine the values to plot for this level
statistic = self.statistic[:, j]
# Determine the position on the categorical and z axes
offpos = pointpos + offsets[j]
z = j + 1
# Draw lines joining each estimate point
if self.join:
color = self.colors[j]
ls = self.linestyles[j]
if self.orient == "h":
ax.plot(statistic, offpos, color=color,
zorder=z, ls=ls, lw=lw)
else:
ax.plot(offpos, statistic, color=color,
zorder=z, ls=ls, lw=lw)
# Draw the confidence intervals
if self.confint.size:
confint = self.confint[:, j]
errcolors = [self.colors[j]] * len(offpos)
self.draw_confints(ax, offpos, confint, errcolors,
self.errwidth, self.capsize,
zorder=z)
# Draw the estimate points
marker = self.markers[j]
if self.orient == "h":
ax.scatter(statistic, offpos, label=hue_level,
c=[self.colors[j]] * len(offpos),
linewidth=mew, marker=marker, s=markersize,
edgecolor=self.colors[j], zorder=z)
else:
ax.scatter(offpos, statistic, label=hue_level,
c=[self.colors[j]] * len(offpos),
linewidth=mew, marker=marker, s=markersize,
edgecolor=self.colors[j], zorder=z)
def plot(self, ax):
"""Make the plot."""
self.draw_points(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _LVPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, k_depth, linewidth, scale, outlier_prop):
if width is None:
width = .8
self.width = width
if saturation is None:
saturation = .75
self.saturation = saturation
if k_depth is None:
k_depth = 'proportion'
self.k_depth = k_depth
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
if scale is None:
scale = 'exponential'
self.scale = scale
self.outlier_prop = outlier_prop
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
def _lv_box_ends(self, vals, k_depth='proportion', outlier_prop=None):
"""Get the number of data points and calculate `depth` of
letter-value plot."""
vals = np.asarray(vals)
vals = vals[np.isfinite(vals)]
n = len(vals)
# If p is not set, calculate it so that 8 points are outliers
if not outlier_prop:
# Conventional boxplots assume this proportion of the data are
# outliers.
p = 0.007
else:
if ((outlier_prop > 1.) or (outlier_prop < 0.)):
raise ValueError('outlier_prop not in range [0, 1]!')
p = outlier_prop
# Select the depth, i.e. number of boxes to draw, based on the method
k_dict = {'proportion': (np.log2(n)) - int(np.log2(n*p)) + 1,
'tukey': (np.log2(n)) - 3,
'trustworthy': (np.log2(n) -
np.log2(2*stats.norm.ppf((1-p))**2)) + 1}
k = k_dict[k_depth]
try:
k = int(k)
except ValueError:
k = 1
# If the number happens to be less than 0, set k to 0
if k < 1.:
k = 1
# Calculate the upper box ends
upper = [100*(1 - 0.5**(i+2)) for i in range(k, -1, -1)]
# Calculate the lower box ends
lower = [100*(0.5**(i+2)) for i in range(k, -1, -1)]
# Stitch the box ends together
percentile_ends = [(i, j) for i, j in zip(lower, upper)]
box_ends = [np.percentile(vals, q) for q in percentile_ends]
return box_ends, k
def _lv_outliers(self, vals, k):
"""Find the outliers based on the letter value depth."""
perc_ends = (100*(0.5**(k+2)), 100*(1 - 0.5**(k+2)))
edges = np.percentile(vals, perc_ends)
lower_out = vals[np.where(vals < edges[0])[0]]
upper_out = vals[np.where(vals > edges[1])[0]]
return np.concatenate((lower_out, upper_out))
def _width_functions(self, width_func):
# Dictionary of functions for computing the width of the boxes
width_functions = {'linear': lambda h, i, k: (i + 1.) / k,
'exponential': lambda h, i, k: 2**(-k+i-1),
'area': lambda h, i, k: (1 - 2**(-k+i-2)) / h}
return width_functions[width_func]
def _lvplot(self, box_data, positions,
color=[255. / 256., 185. / 256., 0.],
vert=True, widths=1, k_depth='proportion',
ax=None, outlier_prop=None, scale='exponential',
**kws):
x = positions[0]
box_data = np.asarray(box_data)
# If we only have one data point, plot a line
if len(box_data) == 1:
kws.update({'color': self.gray, 'linestyle': '-'})
ys = [box_data[0], box_data[0]]
xs = [x - widths / 2, x + widths / 2]
if vert:
xx, yy = xs, ys
else:
xx, yy = ys, xs
ax.plot(xx, yy, **kws)
else:
# Get the number of data points and calculate "depth" of
# letter-value plot
box_ends, k = self._lv_box_ends(box_data, k_depth=k_depth,
outlier_prop=outlier_prop)
# Anonymous functions for calculating the width and height
# of the letter value boxes
width = self._width_functions(scale)
# Function to find height of boxes
def height(b):
return b[1] - b[0]
# Functions to construct the letter value boxes
def vert_perc_box(x, b, i, k, w):
rect = Patches.Rectangle((x - widths*w / 2, b[0]),
widths*w,
height(b), fill=True)
return rect
def horz_perc_box(x, b, i, k, w):
rect = Patches.Rectangle((b[0], x - widths*w / 2),
height(b), widths*w,
fill=True)
return rect
# Scale the width of the boxes so the biggest starts at 1
w_area = np.array([width(height(b), i, k)
for i, b in enumerate(box_ends)])
w_area = w_area / np.max(w_area)
# Calculate the medians
y = np.median(box_data)
# Calculate the outliers and plot
outliers = self._lv_outliers(box_data, k)
if vert:
boxes = [vert_perc_box(x, b[0], i, k, b[1])
for i, b in enumerate(zip(box_ends, w_area))]
# Plot the medians
ax.plot([x - widths / 2, x + widths / 2], [y, y],
c='.15', alpha=.45, **kws)
ax.scatter(np.repeat(x, len(outliers)), outliers,
marker='d', c=mpl.colors.rgb2hex(color), **kws)
else:
boxes = [horz_perc_box(x, b[0], i, k, b[1])
for i, b in enumerate(zip(box_ends, w_area))]
# Plot the medians
ax.plot([y, y], [x - widths / 2, x + widths / 2],
c='.15', alpha=.45, **kws)
ax.scatter(outliers, np.repeat(x, len(outliers)),
marker='d', c=color, **kws)
# Construct a color map from the input color
rgb = [[1, 1, 1], list(color)]
cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)
collection = PatchCollection(boxes, cmap=cmap)
# Set the color gradation
collection.set_array(np.array(np.linspace(0, 1, len(boxes))))
# Plot the boxes
ax.add_collection(collection)
def draw_letter_value_plot(self, ax, kws):
"""Use matplotlib to draw a letter value plot on an Axes."""
vert = self.orient == "v"
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None:
# Handle case where there is data at this level
if group_data.size == 0:
continue
# Draw a single box or a set of boxes
# with a single level of grouping
box_data = remove_na(group_data)
# Handle case where there is no non-null data
if box_data.size == 0:
continue
color = self.colors[i]
artist_dict = self._lvplot(box_data,
positions=[i],
color=color,
vert=vert,
widths=self.width,
k_depth=self.k_depth,
ax=ax,
scale=self.scale,
outlier_prop=self.outlier_prop,
**kws)
else:
# Draw nested groups of boxes
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
# Add a legend for this hue level
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle case where there is data at this level
if group_data.size == 0:
continue
hue_mask = self.plot_hues[i] == hue_level
box_data = remove_na(group_data[hue_mask])
# Handle case where there is no non-null data
if box_data.size == 0:
continue
color = self.colors[j]
center = i + offsets[j]
artist_dict = self._lvplot(box_data,
positions=[center],
color=color,
vert=vert,
widths=self.nested_width,
k_depth=self.k_depth,
ax=ax,
scale=self.scale,
outlier_prop=self.outlier_prop,
**kws)
def plot(self, ax, boxplot_kws):
"""Make the plot."""
self.draw_letter_value_plot(ax, boxplot_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
_categorical_docs = dict(
# Shared narrative docs
main_api_narrative=dedent("""\
Input data can be passed in a variety of formats, including:
- Vectors of data represented as lists, numpy arrays, or pandas Series
objects passed directly to the ``x``, ``y``, and/or ``hue`` parameters.
- A "long-form" DataFrame, in which case the ``x``, ``y``, and ``hue``
variables will determine how the data are plotted.
- A "wide-form" DataFrame, such that each numeric column will be plotted.
- Anything accepted by ``plt.boxplot`` (e.g. a 2d array or list of vectors)
In most cases, it is possible to use numpy or Python objects, but pandas
objects are preferable because the associated names will be used to
annotate the axes. Additionally, you can use Categorical types for the
grouping variables to control the order of plot elements.\
"""),
# Shared function parameters
input_params=dedent("""\
x, y, hue : names of variables in ``data`` or vector data, optional
Inputs for plotting long-form data. See examples for interpretation.\
"""),
string_input_params=dedent("""\
x, y, hue : names of variables in ``data``
Inputs for plotting long-form data. See examples for interpretation.\
"""),
categorical_data=dedent("""\
data : DataFrame, array, or list of arrays, optional
Dataset for plotting. If ``x`` and ``y`` are absent, this is
interpreted as wide-form. Otherwise it is expected to be long-form.\
"""),
long_form_data=dedent("""\
data : DataFrame
Long-form (tidy) dataset for plotting. Each column should correspond
to a variable, and each row should correspond to an observation.\
"""),
order_vars=dedent("""\
order, hue_order : lists of strings, optional
Order to plot the categorical levels in, otherwise the levels are
inferred from the data objects.\
"""),
stat_api_params=dedent("""\
estimator : callable that maps vector -> scalar, optional
Statistical function to estimate within each categorical bin.
ci : float or None, optional
Size of confidence intervals to draw around estimated values. If
``None``, no bootstrapping will be performed, and error bars will
not be drawn.
n_boot : int, optional
Number of bootstrap iterations to use when computing confidence
intervals.
units : name of variable in ``data`` or vector data, optional
Identifier of sampling units, which will be used to perform a
multilevel bootstrap and account for repeated measures design.\
"""),
orient=dedent("""\
orient : "v" | "h", optional
Orientation of the plot (vertical or horizontal). This is usually
inferred from the dtype of the input variables, but can be used to
specify when the "categorical" variable is a numeric or when plotting
wide-form data.\
"""),
color=dedent("""\
color : matplotlib color, optional
Color for all of the elements, or seed for :func:`light_palette` when
using hue nesting.\
"""),
palette=dedent("""\
palette : palette name, list, or dict, optional
Color palette that maps either the grouping variable or the hue
variable. If the palette is a dictionary, keys should be names of
levels and values should be matplotlib colors.\
"""),
saturation=dedent("""\
saturation : float, optional
Proportion of the original saturation to draw colors at. Large patches
often look better with slightly desaturated colors, but set this to
``1`` if you want the plot colors to perfectly match the input color
spec.\
"""),
capsize=dedent("""\
capsize : float, optional
Width of the "caps" on error bars.
"""),
errwidth=dedent("""\
errwidth : float, optional
Thickness of error bar lines (and caps).\
"""),
width=dedent("""\
width : float, optional
Width of a full element when not using hue nesting, or width of all the
elements for one level of the major grouping variable.\
"""),
linewidth=dedent("""\
linewidth : float, optional
Width of the gray lines that frame the plot elements.\
"""),
ax_in=dedent("""\
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.\
"""),
ax_out=dedent("""\
ax : matplotlib Axes
Returns the Axes object with the boxplot drawn onto it.\
"""),
# Shared see also
boxplot=dedent("""\
boxplot : A traditional box-and-whisker plot with a similar API.\
"""),
violinplot=dedent("""\
violinplot : A combination of boxplot and kernel density estimation.\
"""),
stripplot=dedent("""\
stripplot : A scatterplot where one variable is categorical. Can be used
in conjunction with other plots to show each observation.\
"""),
swarmplot=dedent("""\
swarmplot : A categorical scatterplot where the points do not overlap. Can
be used with other plots to show each observation.\
"""),
barplot=dedent("""\
barplot : Show point estimates and confidence intervals using bars.\
"""),
countplot=dedent("""\
countplot : Show the counts of observations in each categorical bin.\
"""),
pointplot=dedent("""\
pointplot : Show point estimates and confidence intervals using scatterplot
glyphs.\
"""),
factorplot=dedent("""\
factorplot : Combine categorical plots and a class:`FacetGrid`.\
"""),
lvplot=dedent("""\
lvplot : An extension of the boxplot for long-tailed and large data sets.
"""),
)
_categorical_docs.update(_facet_docs)
def boxplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
orient=None, color=None, palette=None, saturation=.75,
width=.8, fliersize=5, linewidth=None, whis=1.5, notch=False,
ax=None, **kwargs):
# Try to handle broken backwards-compatability
# This should help with the lack of a smooth deprecation,
# but won't catch everything
warn = False
if isinstance(x, pd.DataFrame):
data = x
x = None
warn = True
if "vals" in kwargs:
x = kwargs.pop("vals")
warn = True
if "groupby" in kwargs:
y = x
x = kwargs.pop("groupby")
warn = True
if "vert" in kwargs:
vert = kwargs.pop("vert", True)
if not vert:
x, y = y, x
orient = "v" if vert else "h"
warn = True
if "names" in kwargs:
kwargs.pop("names")
warn = True
if "join_rm" in kwargs:
kwargs.pop("join_rm")
warn = True
msg = ("The boxplot API has been changed. Attempting to adjust your "
"arguments for the new API (which might not work). Please update "
"your code. See the version 0.6 release notes for more info.")
if warn:
warnings.warn(msg, UserWarning)
plotter = _BoxPlotter(x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, fliersize, linewidth)
if ax is None:
ax = plt.gca()
kwargs.update(dict(whis=whis, notch=notch))
plotter.plot(ax, kwargs)
return ax
boxplot.__doc__ = dedent("""\
Draw a box plot to show distributions with respect to categories.
A box plot (or box-and-whisker plot) shows the distribution of quantitative
data in a way that facilitates comparisons between variables or across
levels of a categorical variable. The box shows the quartiles of the
dataset while the whiskers extend to show the rest of the distribution,
except for points that are determined to be "outliers" using a method
that is a function of the inter-quartile range.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{orient}
{color}
{palette}
{saturation}
{width}
fliersize : float, optional
Size of the markers used to indicate outlier observations.
{linewidth}
whis : float, optional
Proportion of the IQR past the low and high quartiles to extend the
plot whiskers. Points outside this range will be identified as
outliers.
notch : boolean, optional
Whether to "notch" the box to indicate a confidence interval for the
median. There are several other parameters that can control how the
notches are drawn; see the ``plt.boxplot`` help for more information
on them.
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed through to ``plt.boxplot`` at draw
time.
Returns
-------
{ax_out}
See Also
--------
{violinplot}
{stripplot}
{swarmplot}
Examples
--------
Draw a single horizontal boxplot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.boxplot(x=tips["total_bill"])
Draw a vertical boxplot grouped by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", data=tips)
Draw a boxplot with nested grouping by two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="Set3")
Draw a boxplot with nested grouping when some bins are empty:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", hue="time",
... data=tips, linewidth=2.5)
Control box order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="time", y="tip", data=tips,
... order=["Dinner", "Lunch"])
Draw a boxplot for each numeric variable in a DataFrame:
.. plot::
:context: close-figs
>>> iris = sns.load_dataset("iris")
>>> ax = sns.boxplot(data=iris, orient="h", palette="Set2")
Use :func:`swarmplot` to show the datapoints on top of the boxes:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", data=tips)
>>> ax = sns.swarmplot(x="day", y="total_bill", data=tips, color=".25")
Draw a box plot on to a :class:`FacetGrid` to group within an additional
categorical variable:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", size=4, aspect=.7)
>>> (g.map(sns.boxplot, "sex", "total_bill", "smoker")
... .despine(left=True)
... .add_legend(title="smoker")) #doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
def violinplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
bw="scott", cut=2, scale="area", scale_hue=True, gridsize=100,
width=.8, inner="box", split=False, orient=None, linewidth=None,
color=None, palette=None, saturation=.75, ax=None, **kwargs):
# Try to handle broken backwards-compatability
# This should help with the lack of a smooth deprecation,
# but won't catch everything
warn = False
if isinstance(x, pd.DataFrame):
data = x
x = None
warn = True
if "vals" in kwargs:
x = kwargs.pop("vals")
warn = True
if "groupby" in kwargs:
y = x
x = kwargs.pop("groupby")
warn = True
if "vert" in kwargs:
vert = kwargs.pop("vert", True)
if not vert:
x, y = y, x
orient = "v" if vert else "h"
warn = True
msg = ("The violinplot API has been changed. Attempting to adjust your "
"arguments for the new API (which might not work). Please update "
"your code. See the version 0.6 release notes for more info.")
if warn:
warnings.warn(msg, UserWarning)
plotter = _ViolinPlotter(x, y, hue, data, order, hue_order,
bw, cut, scale, scale_hue, gridsize,
width, inner, split, orient, linewidth,
color, palette, saturation)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax
violinplot.__doc__ = dedent("""\
Draw a combination of boxplot and kernel density estimate.
A violin plot plays a similar role as a box and whisker plot. It shows the
distribution of quantitative data across several levels of one (or more)
categorical variables such that those distributions can be compared. Unlike
a box plot, in which all of the plot components correspond to actual
datapoints, the violin plot features a kernel density estimation of the
underlying distribution.
This can be an effective and attractive way to show multiple distributions
of data at once, but keep in mind that the estimation procedure is
influenced by the sample size, and violins for relatively small samples
might look misleadingly smooth.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
bw : {{'scott', 'silverman', float}}, optional
Either the name of a reference rule or the scale factor to use when
computing the kernel bandwidth. The actual kernel size will be
determined by multiplying the scale factor by the standard deviation of
the data within each bin.
cut : float, optional
Distance, in units of bandwidth size, to extend the density past the
extreme datapoints. Set to 0 to limit the violin range within the range
of the observed data (i.e., to have the same effect as ``trim=True`` in
``ggplot``.
scale : {{"area", "count", "width"}}, optional
The method used to scale the width of each violin. If ``area``, each
violin will have the same area. If ``count``, the width of the violins
will be scaled by the number of observations in that bin. If ``width``,
each violin will have the same width.
scale_hue : bool, optional
When nesting violins using a ``hue`` variable, this parameter
determines whether the scaling is computed within each level of the
major grouping variable (``scale_hue=True``) or across all the violins
on the plot (``scale_hue=False``).
gridsize : int, optional
Number of points in the discrete grid used to compute the kernel
density estimate.
{width}
inner : {{"box", "quartile", "point", "stick", None}}, optional
Representation of the datapoints in the violin interior. If ``box``,
draw a miniature boxplot. If ``quartiles``, draw the quartiles of the
distribution. If ``point`` or ``stick``, show each underlying
datapoint. Using ``None`` will draw unadorned violins.
split : bool, optional
When using hue nesting with a variable that takes two levels, setting
``split`` to True will draw half of a violin for each level. This can
make it easier to directly compare the distributions.
{orient}
{linewidth}
{color}
{palette}
{saturation}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{boxplot}
{stripplot}
{swarmplot}
Examples
--------
Draw a single horizontal violinplot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.violinplot(x=tips["total_bill"])
Draw a vertical violinplot grouped by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", data=tips)
Draw a violinplot with nested grouping by two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="muted")
Draw split violins to compare the across the hue variable:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="muted", split=True)
Control violin order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="time", y="tip", data=tips,
... order=["Dinner", "Lunch"])
Scale the violin width by the number of observations in each bin:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count")
Draw the quartiles as horizontal lines instead of a mini-box:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="quartile")
Show each observation with a stick inside the violin:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick")
Scale the density relative to the counts across all bins:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick", scale_hue=False)
Use a narrow bandwidth to reduce the amount of smoothing:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick",
... scale_hue=False, bw=.2)
Draw horizontal violins:
.. plot::
:context: close-figs
>>> planets = sns.load_dataset("planets")
>>> ax = sns.violinplot(x="orbital_period", y="method",
... data=planets[planets.orbital_period < 1000],
... scale="width", palette="Set3")
Draw a violin plot on to a :class:`FacetGrid` to group within an additional
categorical variable:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", size=4, aspect=.7)
>>> (g.map(sns.violinplot, "sex", "total_bill", "smoker", split=True)
... .despine(left=True)
... .add_legend(title="smoker")) # doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
def stripplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
jitter=False, split=False, orient=None, color=None, palette=None,
size=5, edgecolor="gray", linewidth=0, ax=None, **kwargs):
plotter = _StripPlotter(x, y, hue, data, order, hue_order,
jitter, split, orient, color, palette)
if ax is None:
ax = plt.gca()
kwargs.setdefault("zorder", 3)
size = kwargs.get("s", size)
if linewidth is None:
linewidth = size / 10
if edgecolor == "gray":
edgecolor = plotter.gray
kwargs.update(dict(s=size ** 2,
edgecolor=edgecolor,
linewidth=linewidth))
plotter.plot(ax, kwargs)
return ax
stripplot.__doc__ = dedent("""\
Draw a scatterplot where one variable is categorical.
A strip plot can be drawn on its own, but it is also a good complement
to a box or violin plot in cases where you want to show all observations
along with some representation of the underlying distribution.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
jitter : float, ``True``/``1`` is special-cased, optional
Amount of jitter (only along the categorical axis) to apply. This
can be useful when you have many points and they overlap, so that
it is easier to see the distribution. You can specify the amount
of jitter (half the width of the uniform random variable support),
or just use ``True`` for a good default.
split : bool, optional
When using ``hue`` nesting, setting this to ``True`` will separate
the strips for different hue levels along the categorical axis.
Otherwise, the points for each level will be plotted on top of
each other.
{orient}
{color}
{palette}
size : float, optional
Diameter of the markers, in points. (Although ``plt.scatter`` is used
to draw the points, the ``size`` argument here takes a "normal"
markersize and not size^2 like ``plt.scatter``.
edgecolor : matplotlib color, "gray" is special-cased, optional
Color of the lines around each point. If you pass ``"gray"``, the
brightness is determined by the color palette used for the body
of the points.
{linewidth}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{swarmplot}
{boxplot}
{violinplot}
Examples
--------
Draw a single horizontal strip plot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.stripplot(x=tips["total_bill"])
Group the strips by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips)
Add jitter to bring out the distribution of values:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips, jitter=True)
Use a smaller amount of jitter:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips, jitter=0.05)
Draw horizontal strips:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="total_bill", y="day", data=tips,
... jitter=True)
Draw outlines around the points:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="total_bill", y="day", data=tips,
... jitter=True, linewidth=1)
Nest the strips within a second categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="sex", y="total_bill", hue="day",
... data=tips, jitter=True)
Draw each level of the ``hue`` variable at different locations on the
major categorical axis:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", hue="smoker",
... data=tips, jitter=True,
... palette="Set2", split=True)
Control strip order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="time", y="tip", data=tips,
... order=["Dinner", "Lunch"])
Draw strips with large points and different aesthetics:
.. plot::
:context: close-figs
>>> ax = sns.stripplot("day", "total_bill", "smoker", data=tips,
... palette="Set2", size=20, marker="D",
... edgecolor="gray", alpha=.25)
Draw strips of observations on top of a box plot:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="tip", y="day", data=tips, whis=np.inf)
>>> ax = sns.stripplot(x="tip", y="day", data=tips,
... jitter=True, color=".3")
Draw strips of observations on top of a violin plot:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", data=tips,
... inner=None, color=".8")
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips, jitter=True)
""").format(**_categorical_docs)
def swarmplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
split=False, orient=None, color=None, palette=None,
size=5, edgecolor="gray", linewidth=0, ax=None, **kwargs):
plotter = _SwarmPlotter(x, y, hue, data, order, hue_order,
split, orient, color, palette)
if ax is None:
ax = plt.gca()
kwargs.setdefault("zorder", 3)
size = kwargs.get("s", size)
if linewidth is None:
linewidth = size / 10
if edgecolor == "gray":
edgecolor = plotter.gray
kwargs.update(dict(s=size ** 2,
edgecolor=edgecolor,
linewidth=linewidth))
plotter.plot(ax, kwargs)
return ax
swarmplot.__doc__ = dedent("""\
Draw a categorical scatterplot with non-overlapping points.
This function is similar to :func:`stripplot`, but the points are adjusted
(only along the categorical axis) so that they don't overlap. This gives a
better representation of the distribution of values, although it does not
scale as well to large numbers of observations (both in terms of the
ability to show all the points and in terms of the computation needed
to arrange them).
This style of plot is often called a "beeswarm".
A swarm plot can be drawn on its own, but it is also a good complement
to a box or violin plot in cases where you want to show all observations
along with some representation of the underlying distribution.
Note that arranging the points properly requires an accurate transformation
between data and point coordinates. This means that non-default axis limits
should be set *before* drawing the swarm plot.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
split : bool, optional
When using ``hue`` nesting, setting this to ``True`` will separate
the strips for different hue levels along the categorical axis.
Otherwise, the points for each level will be plotted in one swarm.
{orient}
{color}
{palette}
size : float, optional
Diameter of the markers, in points. (Although ``plt.scatter`` is used
to draw the points, the ``size`` argument here takes a "normal"
markersize and not size^2 like ``plt.scatter``.
edgecolor : matplotlib color, "gray" is special-cased, optional
Color of the lines around each point. If you pass ``"gray"``, the
brightness is determined by the color palette used for the body
of the points.
{linewidth}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{boxplot}
{violinplot}
{stripplot}
{factorplot}
Examples
--------
Draw a single horizontal swarm plot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.swarmplot(x=tips["total_bill"])
Group the swarms by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.swarmplot(x="day", y="total_bill", data=tips)
Draw horizontal swarms:
.. plot::
:context: close-figs
>>> ax = sns.swarmplot(x="total_bill", y="day", data=tips)
Color the points using a second categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.swarmplot(x="day", y="total_bill", hue="sex", data=tips)
Split each level of the ``hue`` variable along the categorical axis:
.. plot::
:context: close-figs
>>> ax = sns.swarmplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="Set2", split=True)
Control swarm order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.swarmplot(x="time", y="tip", data=tips,
... order=["Dinner", "Lunch"])
Plot using larger points:
.. plot::
:context: close-figs
>>> ax = sns.swarmplot(x="time", y="tip", data=tips, size=6)
Draw swarms of observations on top of a box plot:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="tip", y="day", data=tips, whis=np.inf)
>>> ax = sns.swarmplot(x="tip", y="day", data=tips)
Draw swarms of observations on top of a violin plot:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", data=tips, inner=None)
>>> ax = sns.swarmplot(x="day", y="total_bill", data=tips,
... color="white", edgecolor="gray")
""").format(**_categorical_docs)
def barplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
estimator=np.mean, ci=95, n_boot=1000, units=None,
orient=None, color=None, palette=None, saturation=.75,
errcolor=".26", errwidth=None, capsize=None, ax=None, **kwargs):
# Handle some deprecated arguments
if "hline" in kwargs:
kwargs.pop("hline")
warnings.warn("The `hline` parameter has been removed", UserWarning)
if "dropna" in kwargs:
kwargs.pop("dropna")
warnings.warn("The `dropna` parameter has been removed", UserWarning)
if "x_order" in kwargs:
order = kwargs.pop("x_order")
warnings.warn("The `x_order` parameter has been renamed `order`",
UserWarning)
plotter = _BarPlotter(x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
orient, color, palette, saturation,
errcolor, errwidth, capsize)
if ax is None:
ax = plt.gca()
plotter.plot(ax, kwargs)
return ax
barplot.__doc__ = dedent("""\
Show point estimates and confidence intervals as rectangular bars.
A bar plot represents an estimate of central tendency for a numeric
variable with the height of each rectangle and provides some indication of
the uncertainty around that estimate using error bars. Bar plots include 0
in the quantitative axis range, and they are a good choice when 0 is a
meaningful value for the quantitative variable, and you want to make
comparisons against it.
For datasets where 0 is not a meaningful value, a point plot will allow you
to focus on differences between levels of one or more categorical
variables.
It is also important to keep in mind that a bar plot shows only the mean
(or other estimator) value, but in many cases it may be more informative to
show the distribution of values at each level of the categorical variables.
In that case, other approaches such as a box or violin plot may be more
appropriate.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{stat_api_params}
{orient}
{color}
{palette}
{saturation}
errcolor : matplotlib color
Color for the lines that represent the confidence interval.
{ax_in}
{errwidth}
{capsize}
kwargs : key, value mappings
Other keyword arguments are passed through to ``plt.bar`` at draw
time.
Returns
-------
{ax_out}
See Also
--------
{countplot}
{pointplot}
{factorplot}
Examples
--------
Draw a set of vertical bar plots grouped by a categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.barplot(x="day", y="total_bill", data=tips)
Draw a set of vertical bars with nested grouping by a two variables:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="day", y="total_bill", hue="sex", data=tips)
Draw a set of horizontal bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="tip", y="day", data=tips)
Control bar order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="time", y="tip", data=tips,
... order=["Dinner", "Lunch"])
Use median as the estimate of central tendency:
.. plot::
:context: close-figs
>>> from numpy import median
>>> ax = sns.barplot(x="day", y="tip", data=tips, estimator=median)
Show the standard error of the mean with the error bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="day", y="tip", data=tips, ci=68)
Add "caps" to the error bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="day", y="tip", data=tips, capsize=.2)
Use a different color palette for the bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot("size", y="total_bill", data=tips,
... palette="Blues_d")
Plot all bars in a single color:
.. plot::
:context: close-figs
>>> ax = sns.barplot("size", y="total_bill", data=tips,
... color="salmon", saturation=.5)
Use ``plt.bar`` keyword arguments to further change the aesthetic:
.. plot::
:context: close-figs
>>> ax = sns.barplot("day", "total_bill", data=tips,
... linewidth=2.5, facecolor=(1, 1, 1, 0),
... errcolor=".2", edgecolor=".2")
""").format(**_categorical_docs)
def pointplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
estimator=np.mean, ci=95, n_boot=1000, units=None,
markers="o", linestyles="-", dodge=False, join=True, scale=1,
orient=None, color=None, palette=None, ax=None, errwidth=None,
capsize=None, **kwargs):
# Handle some deprecated arguments
if "hline" in kwargs:
kwargs.pop("hline")
warnings.warn("The `hline` parameter has been removed", UserWarning)
if "dropna" in kwargs:
kwargs.pop("dropna")
warnings.warn("The `dropna` parameter has been removed", UserWarning)
if "x_order" in kwargs:
order = kwargs.pop("x_order")
warnings.warn("The `x_order` parameter has been renamed `order`",
UserWarning)
plotter = _PointPlotter(x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
markers, linestyles, dodge, join, scale,
orient, color, palette, errwidth, capsize)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax
pointplot.__doc__ = dedent("""\
Show point estimates and confidence intervals using scatter plot glyphs.
A point plot represents an estimate of central tendency for a numeric
variable by the position of scatter plot points and provides some
indication of the uncertainty around that estimate using error bars.
Point plots can be more useful than bar plots for focusing comparisons
between different levels of one or more categorical variables. They are
particularly adept at showing interactions: how the relationship between
levels of one categorical variable changes across levels of a second
categorical variable. The lines that join each point from the same ``hue``
level allow interactions to be judged by differences in slope, which is
easier for the eyes than comparing the heights of several groups of points
or bars.
It is important to keep in mind that a point plot shows only the mean (or
other estimator) value, but in many cases it may be more informative to
show the distribution of values at each level of the categorical variables.
In that case, other approaches such as a box or violin plot may be more
appropriate.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{stat_api_params}
markers : string or list of strings, optional
Markers to use for each of the ``hue`` levels.
linestyles : string or list of strings, optional
Line styles to use for each of the ``hue`` levels.
dodge : bool or float, optional
Amount to separate the points for each level of the ``hue`` variable
along the categorical axis.
join : bool, optional
If ``True``, lines will be drawn between point estimates at the same
``hue`` level.
scale : float, optional
Scale factor for the plot elements.
{orient}
{color}
{palette}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{barplot}
{factorplot}
Examples
--------
Draw a set of vertical point plots grouped by a categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("darkgrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.pointplot(x="time", y="total_bill", data=tips)
Draw a set of vertical points with nested grouping by a two variables:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips)
Separate the points for different hue levels along the categorical axis:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips, dodge=True)
Use a different marker and line style for the hue levels:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips,
... markers=["o", "x"],
... linestyles=["-", "--"])
Draw a set of horizontal points:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="tip", y="day", data=tips)
Don't draw a line connecting each point:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="tip", y="day", data=tips, join=False)
Use a different color for a single-layer plot:
.. plot::
:context: close-figs
>>> ax = sns.pointplot("time", y="total_bill", data=tips,
... color="#bb3f3f")
Use a different color palette for the points:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips, palette="Set2")
Control point order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="tip", data=tips,
... order=["Dinner", "Lunch"])
Use median as the estimate of central tendency:
.. plot::
:context: close-figs
>>> from numpy import median
>>> ax = sns.pointplot(x="day", y="tip", data=tips, estimator=median)
Show the standard error of the mean with the error bars:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="day", y="tip", data=tips, ci=68)
Add "caps" to the error bars:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="day", y="tip", data=tips, capsize=.2)
""").format(**_categorical_docs)
def countplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
orient=None, color=None, palette=None, saturation=.75,
ax=None, **kwargs):
estimator = len
ci = None
n_boot = 0
units = None
errcolor = None
if x is None and y is not None:
orient = "h"
x = y
elif y is None and x is not None:
orient = "v"
y = x
elif x is not None and y is not None:
raise TypeError("Cannot pass values for both `x` and `y`")
else:
raise TypeError("Must pass values for either `x` or `y`")
plotter = _BarPlotter(x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
orient, color, palette, saturation,
errcolor)
plotter.value_label = "count"
if ax is None:
ax = plt.gca()
plotter.plot(ax, kwargs)
return ax
countplot.__doc__ = dedent("""\
Show the counts of observations in each categorical bin using bars.
A count plot can be thought of as a histogram across a categorical, instead
of quantitative, variable. The basic API and options are identical to those
for :func:`barplot`, so you can compare counts across nested variables.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{orient}
{color}
{palette}
{saturation}
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed to ``plt.bar``.
Returns
-------
{ax_out}
See Also
--------
{barplot}
{factorplot}
Examples
--------
Show value counts for a single categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set(style="darkgrid")
>>> titanic = sns.load_dataset("titanic")
>>> ax = sns.countplot(x="class", data=titanic)
Show value counts for two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="class", hue="who", data=titanic)
Plot the bars horizontally:
.. plot::
:context: close-figs
>>> ax = sns.countplot(y="class", hue="who", data=titanic)
Use a different color palette:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="who", data=titanic, palette="Set3")
Use ``plt.bar`` keyword arguments for a different look:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="who", data=titanic,
... facecolor=(0, 0, 0, 0),
... linewidth=5,
... edgecolor=sns.color_palette("dark", 3))
""").format(**_categorical_docs)
def factorplot(x=None, y=None, hue=None, data=None, row=None, col=None,
col_wrap=None, estimator=np.mean, ci=95, n_boot=1000,
units=None, order=None, hue_order=None, row_order=None,
col_order=None, kind="point", size=4, aspect=1,
orient=None, color=None, palette=None,
legend=True, legend_out=True, sharex=True, sharey=True,
margin_titles=False, facet_kws=None, **kwargs):
# Handle some deprecated arguments
if "hline" in kwargs:
kwargs.pop("hline")
warnings.warn("The `hline` parameter has been removed", UserWarning)
if "dropna" in kwargs:
kwargs.pop("dropna")
warnings.warn("The `dropna` parameter has been removed", UserWarning)
if "x_order" in kwargs:
order = kwargs.pop("x_order")
warnings.warn("The `x_order` parameter has been renamed `order`",
UserWarning)
# Determine the plotting function
try:
plot_func = globals()[kind + "plot"]
except KeyError:
err = "Plot kind '{}' is not recognized".format(kind)
raise ValueError(err)
# Alias the input variables to determine categorical order and palette
# correctly in the case of a count plot
if kind == "count":
if x is None and y is not None:
x_, y_, orient = y, y, "h"
elif y is None and x is not None:
x_, y_, orient = x, x, "v"
else:
raise ValueError("Either `x` or `y` must be None for count plots")
else:
x_, y_ = x, y
# Determine the order for the whole dataset, which will be used in all
# facets to ensure representation of all data in the final plot
p = _CategoricalPlotter()
p.establish_variables(x_, y_, hue, data, orient, order, hue_order)
order = p.group_names
hue_order = p.hue_names
# Determine the palette to use
# (FacetGrid will pass a value for ``color`` to the plotting function
# so we need to define ``palette`` to get default behavior for the
# categorical functions
p.establish_colors(color, palette, 1)
if kind != "point" or hue is not None:
palette = p.colors
# Determine keyword arguments for the facets
facet_kws = {} if facet_kws is None else facet_kws
facet_kws.update(
data=data, row=row, col=col,
row_order=row_order, col_order=col_order,
col_wrap=col_wrap, size=size, aspect=aspect,
sharex=sharex, sharey=sharey,
legend_out=legend_out, margin_titles=margin_titles,
dropna=False,
)
# Determine keyword arguments for the plotting function
plot_kws = dict(
order=order, hue_order=hue_order,
orient=orient, color=color, palette=palette,
)
plot_kws.update(kwargs)
if kind in ["bar", "point"]:
plot_kws.update(
estimator=estimator, ci=ci, n_boot=n_boot, units=units,
)
# Initialize the facets
g = FacetGrid(**facet_kws)
# Draw the plot onto the facets
g.map_dataframe(plot_func, x, y, hue, **plot_kws)
# Special case axis labels for a count type plot
if kind == "count":
if x is None:
g.set_axis_labels(x_var="count")
if y is None:
g.set_axis_labels(y_var="count")
if legend and (hue is not None) and (hue not in [x, row, col]):
hue_order = list(map(str, hue_order))
g.add_legend(title=hue, label_order=hue_order)
return g
factorplot.__doc__ = dedent("""\
Draw a categorical plot onto a FacetGrid.
The default plot that is shown is a point plot, but other seaborn
categorical plots can be chosen with the ``kind`` parameter, including
box plots, violin plots, bar plots, or strip plots.
It is important to choose how variables get mapped to the plot structure
such that the most important comparisons are easiest to make. As a general
rule, it is easier to compare positions that are closer together, so the
``hue`` variable should be used for the most important comparisons. For
secondary comparisons, try to share the quantitative axis (so, use ``col``
for vertical plots and ``row`` for horizontal plots). Note that, although
it is possible to make rather complex plots using this function, in many
cases you may be better served by created several smaller and more focused
plots than by trying to stuff many comparisons into one figure.
After plotting, the :class:`FacetGrid` with the plot is returned and can
be used directly to tweak supporting plot details or add other layers.
Note that, unlike when using the underlying plotting functions directly,
data must be passed in a long-form DataFrame with variables specified by
passing strings to ``x``, ``y``, ``hue``, and other parameters.
As in the case with the underlying plot functions, if variables have a
``categorical`` data type, the correct orientation of the plot elements,
the levels of the categorical variables, and their order will be inferred
from the objects. Otherwise you may have to use the function parameters
(``orient``, ``order``, ``hue_order``, etc.) to set up the plot correctly.
Parameters
----------
{string_input_params}
{long_form_data}
row, col : names of variables in ``data``, optional
Categorical variables that will determine the faceting of the grid.
{col_wrap}
{stat_api_params}
{order_vars}
row_order, col_order : lists of strings, optional
Order to organize the rows and/or columns of the grid in, otherwise the
orders are inferred from the data objects.
kind : {{``point``, ``bar``, ``count``, ``box``, ``violin``, ``strip``}}
The kind of plot to draw.
{size}
{aspect}
{orient}
{color}
{palette}
legend : bool, optional
If ``True`` and there is a ``hue`` variable, draw a legend on the plot.
{legend_out}
{share_xy}
{margin_titles}
facet_kws : dict, optional
Dictionary of other keyword arguments to pass to :class:`FacetGrid`.
kwargs : key, value pairings
Other keyword arguments are passed through to the underlying plotting
function.
Returns
-------
g : :class:`FacetGrid`
Returns the :class:`FacetGrid` object with the plot on it for further
tweaking.
Examples
--------
Draw a single facet to use the :class:`FacetGrid` legend placement:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set(style="ticks")
>>> exercise = sns.load_dataset("exercise")
>>> g = sns.factorplot(x="time", y="pulse", hue="kind", data=exercise)
Use a different plot kind to visualize the same data:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... data=exercise, kind="violin")
Facet along the columns to show a third categorical variable:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... col="diet", data=exercise)
Use a different size and aspect ratio for the facets:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... col="diet", data=exercise,
... size=5, aspect=.8)
Make many column facets and wrap them into the rows of the grid:
.. plot::
:context: close-figs
>>> titanic = sns.load_dataset("titanic")
>>> g = sns.factorplot("alive", col="deck", col_wrap=4,
... data=titanic[titanic.deck.notnull()],
... kind="count", size=2.5, aspect=.8)
Plot horizontally and pass other keyword arguments to the plot function:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="age", y="embark_town",
... hue="sex", row="class",
... data=titanic[titanic.embark_town.notnull()],
... orient="h", size=2, aspect=3.5, palette="Set3",
... kind="violin", split=True, cut=0, bw=.2)
Use methods on the returned :class:`FacetGrid` to tweak the presentation:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="who", y="survived", col="class",
... data=titanic, saturation=.5,
... kind="bar", ci=None, aspect=.6)
>>> (g.set_axis_labels("", "Survival Rate")
... .set_xticklabels(["Men", "Women", "Children"])
... .set_titles("{{col_name}} {{col_var}}")
... .set(ylim=(0, 1))
... .despine(left=True)) #doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
def lvplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
orient=None, color=None, palette=None, saturation=.75,
width=.8, k_depth='proportion', linewidth=None, scale='exponential',
outlier_prop=None, ax=None, **kwargs):
plotter = _LVPlotter(x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, k_depth, linewidth, scale, outlier_prop)
if ax is None:
ax = plt.gca()
plotter.plot(ax, kwargs)
return ax
lvplot.__doc__ = dedent("""\
Create a letter value plot
Letter value (LV) plots are non-parametric estimates of the distribution of
a dataset, similar to boxplots. LV plots are also similar to violin plots
but without the need to fit a kernel density estimate. Thus, LV plots are
fast to generate, directly interpretable in terms of the distribution of
data, and easy to understand. For a more extensive explanation of letter
value plots and their properties, see Hadley Wickham's excellent paper on
the topic:
http://vita.had.co.nz/papers/letter-value-plot.html
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{orient}
{color}
{palette}
{saturation}
{width}
k_depth : "proportion" | "tukey" | "trustworthy", optional
The number of boxes, and by extension number of percentiles, to draw.
All methods are detailed in Wickham's paper. Each makes different
assumptions about the number of outliers and leverages different
statistical properties.
{linewidth}
scale : "linear" | "exonential" | "area"
Method to use for the width of the letter value boxes. All give similar
results visually. "linear" reduces the width by a constant linear
factor, "exponential" uses the proportion of data not covered, "area"
is proportional to the percentage of data covered.
outlier_prop : float, optional
Proportion of data believed to be outliers. Is used in conjuction with
k_depth to determine the number of percentiles to draw. Defaults to
0.007 as a proportion of outliers. Should be in range [0, 1].
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed through to ``plt.plot`` and
``plt.scatter`` at draw time.
Returns
-------
{ax_out}
See Also
--------
{violinplot}
{boxplot}
Examples
--------
Draw a single horizontal letter value plot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.lvplot(x=tips["total_bill"])
Draw a vertical letter value plot grouped by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.lvplot(x="day", y="total_bill", data=tips)
Draw a letter value plot with nested grouping by two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.lvplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="Set3")
Draw a letter value plot with nested grouping when some bins are empty:
.. plot::
:context: close-figs
>>> ax = sns.lvplot(x="day", y="total_bill", hue="time",
... data=tips, linewidth=2.5)
Control box order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.lvplot(x="time", y="tip", data=tips,
... order=["Dinner", "Lunch"])
Draw a letter value plot for each numeric variable in a DataFrame:
.. plot::
:context: close-figs
>>> iris = sns.load_dataset("iris")
>>> ax = sns.lvplot(data=iris, orient="h", palette="Set2")
Use :func:`stripplot` to show the datapoints on top of the boxes:
.. plot::
:context: close-figs
>>> ax = sns.lvplot(x="day", y="total_bill", data=tips)
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips,
... size=4, jitter=True, edgecolor="gray")
Draw a letter value plot on to a :class:`FacetGrid` to group within an
additional categorical variable:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", size=4, aspect=.7)
>>> (g.map(sns.lvplot, "sex", "total_bill", "smoker")
... .despine(left=True)
... .add_legend(title="smoker")) #doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
| mit |
sourabhdalvi/sourabhdalvi.github.io | markdown_generator/publications.py | 197 | 3887 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| mit |
franzpl/sweep | peak_to_noise_ratio/pnr_depends_on_excitation_length.py | 2 | 1779 | #!/usr/bin/env python3
"""The influence of excitation length.
"""
import sys
sys.path.append('..')
import measurement_chain
import plotting
import calculation
import ir_imitation
import generation
import matplotlib.pyplot as plt
import windows
from scipy.signal import lfilter, fftconvolve
import numpy as np
# Parameters of the measuring system
fs = 44100
fstart = 1
fstop = 22050
duration = 1
pad = 4
duration_list = np.arange(1, 50, 1)
# Noise in measurement chain
noise_level_db = -13.5
noise = measurement_chain.additive_noise(noise_level_db)
# FIR-Filter-System
dirac_system = measurement_chain.convolution([1.0])
# Combinate system elements
system = measurement_chain.chained(dirac_system, noise)
def get_results(duration):
excitation = generation.log_sweep(fstart, fstop, duration, fs)
excitation_zeropadded = generation.zero_padding(excitation,
duration + pad, fs)
system_response = system(excitation_zeropadded)
ir = calculation.deconv_process(excitation_zeropadded,
system_response,
fs)
return ir
pnr_array = []
with open("pnr_depends_on_excitation_length.txt", "w") as f:
for duration in duration_list:
ir = get_results(duration)
pnr = calculation.pnr_db(ir[0], ir[1:4 * fs])
pnr_array.append(pnr)
f.write(
str(duration) + " " + str(pnr) + " \n")
plt.plot(duration_list, pnr_array)
plt.ticklabel_format(useOffset=False)
plt.title('PNR depends on Averaging Length')
plt.xlabel('Duration Length / s')
plt.ylabel('PNR / dB')
plt.grid(True)
xticks = [1, 10, 20, 30, 40, 50]
plt.xticks(xticks)
plt.xlim([1, 50])
plt.savefig('pnr_depends_on_averaging_length')
| mit |
wavelets/BayesDataAnalysisWithPyMC | BayesDataAnalysisWithPymc/plot_post.py | 2 | 2926 | # -*- coding: utf-8 -*-
'''Plot the histogram of the posterior distribution sample,
with the mean and the 95% HDI.
Adaptation of the R code from "Doing Bayesian Data Analysis",
by John K. Krushcke.
More info: http://doingbayesiandataanalysis.blogspot.com.br/
Histogram code based on (copied from!) 'Probabilistic Programming and
Bayesian Methods for Hackers', by Cameron Davidson-Pilon.
More info: https://github.com/CamDavidsonPilon/
Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
'''
from __future__ import division
from short_hdi import short_hdi
from matplotlib import pyplot as plot
def plot_post(sample, title='Posterior',
cred=0.95, comp=None, *args, **kwargs):
'''Plot the histogram of the posterior distribution sample,
with the mean and the HDI.
:Arguments:
sample: array of sample values.
cred: credible interval (default: 95%)
comp: value for comparison (default: None)
title: String value for graph title.
'''
# First we compute the shortest HDI using Krushcke's algorithm.
sample_hdi = short_hdi(sample)
# Then we plot the histogram of the sample.
ax = plot.hist(sample,
bins=25,
alpha=0.85,
label='',
normed=True)
# Force the y-axis to be limited to 1.1 times the max probability density.
maxy = 1.1 * max(ax[0])
plot.ylim(0.0, maxy)
# No y-axis label, they are not important here.
plot.yticks([])
# Should we plot a vertical line on the mean?
#plot.vlines(sample.mean(), 0, maxy, linestyle='--',
# label=r'Mean (%0.3f)' % sample.mean())
# But we keep the mean value in its right place.
plot.text(sample.mean(), 0.9 * max(ax[0]), 'Mean: %0.3f' % sample.mean())
#plot.legend(loc='upper right') #Legends are cumbersome!
plot.title(title)
# Plot the HDI as a vertical line with their respective values.
plot.hlines(y=0, xmin=sample_hdi[0], xmax=sample_hdi[1], linewidth=6)
plot.text(sample_hdi[0], max(ax[0]) / 20, '%0.3f' % sample_hdi[0],
horizontalalignment='center')
plot.text(sample_hdi[1], max(ax[0]) / 20, '%0.3f' % sample_hdi[1],
horizontalalignment='center')
# In case there is a comparison value, plot it and
# compute how much of the posterior falls at each side.
if comp != None:
loc = max(ax[0]) / 2.0
plot.vlines(comp, 0, loc, color='green', linestyle='--')
less = 100 * (sum(sample < comp)) / len(sample)
more = 100 * (sum(sample > comp)) / len(sample)
print less, more
plot.text(comp, loc, '%0.1f%% < %0.1f < %0.1f%%' % (less, comp, more),
color='green', horizontalalignment='center')
#return ax # I thought the function should return something. It's not needed.
| mit |
sergeykolychev/mxnet | example/reinforcement-learning/ddpg/strategies.py | 42 | 2473 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
class BaseStrategy(object):
"""
Base class of exploration strategy.
"""
def get_action(self, obs, policy):
raise NotImplementedError
def reset(self):
pass
class OUStrategy(BaseStrategy):
"""
Ornstein-Uhlenbeck process: dxt = theta * (mu - xt) * dt + sigma * dWt
where Wt denotes the Wiener process.
"""
def __init__(self, env_spec, mu=0, theta=0.15, sigma=0.3):
self.mu = mu
self.theta = theta
self.sigma = sigma
self.action_space = env_spec.action_space
self.state = np.ones(self.action_space.flat_dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state
def reset(self):
self.state = np.ones(self.action_space.flat_dim) * self.mu
def get_action(self, obs, policy):
# get_action accepts a 2D tensor with one row
obs = obs.reshape((1, -1))
action = policy.get_action(obs)
increment = self.evolve_state()
return np.clip(action + increment,
self.action_space.low,
self.action_space.high)
if __name__ == "__main__":
class Env1(object):
def __init__(self):
self.action_space = Env2()
class Env2(object):
def __init__(self):
self.flat_dim = 2
env_spec = Env1()
test = OUStrategy(env_spec)
states = []
for i in range(1000):
states.append(test.evolve_state()[0])
import matplotlib.pyplot as plt
plt.plot(states)
plt.show()
| apache-2.0 |
abinashpanda/pgmpy | pgmpy/models/NaiveBayes.py | 3 | 7053 | from pgmpy.independencies import Independencies
from pgmpy.models import BayesianModel
class NaiveBayes(BayesianModel):
"""
Class to represent Naive Bayes.
Subclass of Bayesian Model.
Model holds directed edges from one parent node to multiple
children nodes only.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object.
Examples
--------
Create an empty Naive Bayes Model with no nodes and no edges.
>>> from pgmpy.models import NaiveBayes
>>> G = NaiveBayes()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node('a')
Add the nodes from any container (a list, set or tuple or the nodes
from another graph).
>>> G.add_nodes_from(['a', 'b', 'c'])
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge('a', 'b')
a list of edges,
>>> G.add_edges_from([('a', 'b'), ('a', 'c')])
If some edges connect nodes not yet in the model, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Shortcuts:**
Many common graph features allow python syntax for speed reporting.
>>> 'a' in G # check if node in graph
True
>>> len(G) # number of nodes in graph
3
"""
def __init__(self, ebunch=None):
self.parent_node = None
self.children_nodes = set()
super(NaiveBayes, self).__init__(ebunch)
def add_edge(self, u, v, *kwargs):
"""
Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph
Parameters
----------
u,v : nodes
Nodes can be any hashable python object.
Examples
--------
>>> from pgmpy.models import NaiveBayes
>>> G = NaiveBayes()
>>> G.add_nodes_from(['a', 'b', 'c'])
>>> G.add_edge('a', 'b')
>>> G.add_edge('a', 'c')
>>> G.edges()
[('a', 'c'), ('a', 'b')]
"""
if self.parent_node and u != self.parent_node:
raise ValueError("Model can have only one parent node.")
self.parent_node = u
self.children_nodes.add(v)
super(NaiveBayes, self).add_edge(u, v, *kwargs)
def _get_ancestors_of(self, obs_nodes_list):
"""
Returns a list of all ancestors of all the observed nodes.
Parameters
----------
obs_nodes_list: string, list-type
name of all the observed nodes
"""
if not obs_nodes_list:
return set()
return set(obs_nodes_list) | set(self.parent_node)
def active_trail_nodes(self, start, observed=None):
"""
Returns all the nodes reachable from start via an active trail.
Parameters
----------
start: Graph node
observed : List of nodes (optional)
If given the active trail would be computed assuming these nodes to be observed.
Examples
--------
>>> from pgmpy.models import NaiveBayes
>>> model = NaiveBayes()
>>> model.add_nodes_from(['a', 'b', 'c', 'd'])
>>> model.add_edges_from([('a', 'b'), ('a', 'c'), ('a', 'd')])
>>> model.active_trail_nodes('a')
{'a', 'b', 'c', 'd'}
>>> model.active_trail_nodes('a', ['b', 'c'])
{'a', 'd'}
>>> model.active_trail_nodes('b', ['a'])
{'b'}
"""
if observed and self.parent_node in observed:
return set(start)
else:
return set(self.nodes()) - set(observed if observed else [])
def local_independencies(self, variables):
"""
Returns a list of independencies objects containing the local independencies
of each of the variables. If local independencies does not exist for a variable
it gives a None for that variable.
Parameters
----------
variables: str or array like
variables whose local independencies are to found.
Examples
--------
>>> from pgmpy.models import NaiveBayes
>>> model = NaiveBayes()
>>> model.add_edges_from([('a', 'b'), ('a', 'c'), ('a', 'd')])
>>> ind = model.local_independencies('b')
>>> ind
[(b _|_ d, c | a)]
"""
independencies = []
for variable in [variables] if isinstance(variables, str) else variables:
if variable != self.parent_node:
independencies.append(Independencies(
[variable, list(set(self.children_nodes) - set(variable)), self.parent_node]))
else:
independencies.append(None)
return independencies
def fit(self, data, parent_node=None, estimator_type=None):
"""
Computes the CPD for each node from a given data in the form of a pandas dataframe.
If a variable from the data is not present in the model, it adds that node into the model.
Parameters
----------
data : pandas DataFrame object
A DataFrame object with column names same as the variable names of network
parent_node: any hashable python object (optional)
Parent node of the model, if not specified it looks for a previously specified
parent node.
estimator: Estimator class
Any pgmpy estimator. If nothing is specified, the default ``MaximumLikelihoodEstimator``
would be used.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import NaiveBayes
>>> model = NaiveBayes()
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model.fit(values, 'A')
>>> model.get_cpds()
[<TabularCPD representing P(D:2 | A:2) at 0x4b72870>,
<TabularCPD representing P(E:2 | A:2) at 0x4bb2150>,
<TabularCPD representing P(A:2) at 0x4bb23d0>,
<TabularCPD representing P(B:2 | A:2) at 0x4bb24b0>,
<TabularCPD representing P(C:2 | A:2) at 0x4bb2750>]
>>> model.edges()
[('A', 'D'), ('A', 'E'), ('A', 'B'), ('A', 'C')]
"""
if not parent_node:
if not self.parent_node:
raise ValueError("parent node must be specified for the model")
else:
parent_node = self.parent_node
if parent_node not in data.columns:
raise ValueError("parent node: {node} is not present in the given data".format(node=parent_node))
for child_node in data.columns:
if child_node != parent_node:
self.add_edge(parent_node, child_node)
super(NaiveBayes, self).fit(data, estimator_type)
| mit |
nikste/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/tensorflow_dataframe.py | 75 | 29377 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import batch
from tensorflow.contrib.learn.python.learn.dataframe.transforms import csv_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import example_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import reader_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import sparsify
from tensorflow.contrib.learn.python.learn.dataframe.transforms import split_mask
from tensorflow.python.client import session as sess
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner as qr
def _expand_file_names(filepatterns):
"""Takes a list of file patterns and returns a list of resolved file names."""
if not isinstance(filepatterns, (list, tuple, set)):
filepatterns = [filepatterns]
filenames = set()
for filepattern in filepatterns:
names = set(gfile.Glob(filepattern))
filenames |= names
return list(filenames)
def _dtype_to_nan(dtype):
if dtype is dtypes.string:
return b""
elif dtype.is_integer:
return np.nan
elif dtype.is_floating:
return np.nan
elif dtype is dtypes.bool:
return np.nan
else:
raise ValueError("Can't parse type without NaN into sparse tensor: %s" %
dtype)
def _get_default_value(feature_spec):
if isinstance(feature_spec, parsing_ops.FixedLenFeature):
return feature_spec.default_value
else:
return _dtype_to_nan(feature_spec.dtype)
class TensorFlowDataFrame(df.DataFrame):
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
def run(self,
num_batches=None,
graph=None,
session=None,
start_queues=True,
initialize_variables=True,
**kwargs):
"""Builds and runs the columns of the `DataFrame` and yields batches.
This is a generator that yields a dictionary mapping column names to
evaluated columns.
Args:
num_batches: the maximum number of batches to produce. If none specified,
the returned value will iterate through infinite batches.
graph: the `Graph` in which the `DataFrame` should be built.
session: the `Session` in which to run the columns of the `DataFrame`.
start_queues: if true, queues will be started before running and halted
after producting `n` batches.
initialize_variables: if true, variables will be initialized.
**kwargs: Additional keyword arguments e.g. `num_epochs`.
Yields:
A dictionary, mapping column names to the values resulting from running
each column for a single batch.
"""
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
if session is None:
session = sess.Session()
self_built = self.build(**kwargs)
keys = list(self_built.keys())
cols = list(self_built.values())
if initialize_variables:
if variables.local_variables():
session.run(variables.local_variables_initializer())
if variables.global_variables():
session.run(variables.global_variables_initializer())
if start_queues:
coord = coordinator.Coordinator()
threads = qr.start_queue_runners(sess=session, coord=coord)
i = 0
while num_batches is None or i < num_batches:
i += 1
try:
values = session.run(cols)
yield collections.OrderedDict(zip(keys, values))
except errors.OutOfRangeError:
break
if start_queues:
coord.request_stop()
coord.join(threads)
def select_rows(self, boolean_series):
"""Returns a `DataFrame` with only the rows indicated by `boolean_series`.
Note that batches may no longer have consistent size after calling
`select_rows`, so the new `DataFrame` may need to be rebatched.
For example:
'''
filtered_df = df.select_rows(df["country"] == "jp").batch(64)
'''
Args:
boolean_series: a `Series` that evaluates to a boolean `Tensor`.
Returns:
A new `DataFrame` with the same columns as `self`, but selecting only the
rows where `boolean_series` evaluated to `True`.
"""
result = type(self)()
for key, col in self._columns.items():
try:
result[key] = col.select_rows(boolean_series)
except AttributeError as e:
raise NotImplementedError((
"The select_rows method is not implemented for Series type {}. "
"Original error: {}").format(type(col), e))
return result
def split(self, index_series, proportion, batch_size=None):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
left_rows = self.select_rows(left_mask)
right_rows = self.select_rows(right_mask)
if batch_size:
left_rows = left_rows.batch(batch_size=batch_size, shuffle=False)
right_rows = right_rows.batch(batch_size=batch_size, shuffle=False)
return left_rows, right_rows
def split_fast(self, index_series, proportion, batch_size,
base_batch_size=1000):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
base_batch_size: the batch size to use for materialized data, prior to the
split.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
self["left_mask__"] = left_mask
self["right_mask__"] = right_mask
# TODO(soergel): instead of base_batch_size can we just do one big batch?
# avoid computing the hashes twice
m = self.materialize_to_memory(batch_size=base_batch_size)
left_rows_df = m.select_rows(m["left_mask__"])
right_rows_df = m.select_rows(m["right_mask__"])
del left_rows_df[["left_mask__", "right_mask__"]]
del right_rows_df[["left_mask__", "right_mask__"]]
# avoid recomputing the split repeatedly
left_rows_df = left_rows_df.materialize_to_memory(batch_size=batch_size)
right_rows_df = right_rows_df.materialize_to_memory(batch_size=batch_size)
return left_rows_df, right_rows_df
def run_one_batch(self):
"""Creates a new 'Graph` and `Session` and runs a single batch.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
batch of the `DataFrame`.
"""
return list(self.run(num_batches=1))[0]
def run_one_epoch(self):
"""Creates a new 'Graph` and `Session` and runs a single epoch.
Naturally this makes sense only for DataFrames that fit in memory.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
epoch of the `DataFrame`.
"""
# batches is a list of dicts of numpy arrays
batches = [b for b in self.run(num_epochs=1)]
# first invert that to make a dict of lists of numpy arrays
pivoted_batches = {}
for k in batches[0].keys():
pivoted_batches[k] = []
for b in batches:
for k, v in b.items():
pivoted_batches[k].append(v)
# then concat the arrays in each column
result = {k: np.concatenate(column_batches)
for k, column_batches in pivoted_batches.items()}
return result
def materialize_to_memory(self, batch_size):
unordered_dict_of_arrays = self.run_one_epoch()
# there may already be an 'index' column, in which case from_ordereddict)
# below will complain because it wants to generate a new one.
# for now, just remove it.
# TODO(soergel): preserve index history, potentially many levels deep
del unordered_dict_of_arrays["index"]
# the order of the columns in this dict is arbitrary; we just need it to
# remain consistent.
ordered_dict_of_arrays = collections.OrderedDict(unordered_dict_of_arrays)
return TensorFlowDataFrame.from_ordereddict(ordered_dict_of_arrays,
batch_size=batch_size)
def batch(self,
batch_size,
shuffle=False,
num_threads=1,
queue_capacity=None,
min_after_dequeue=None,
seed=None):
"""Resize the batches in the `DataFrame` to the given `batch_size`.
Args:
batch_size: desired batch size.
shuffle: whether records should be shuffled. Defaults to true.
num_threads: the number of enqueueing threads.
queue_capacity: capacity of the queue that will hold new batches.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` with `batch_size` rows.
"""
column_names = list(self._columns.keys())
if shuffle:
batcher = batch.ShuffleBatch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
seed=seed)
else:
batcher = batch.Batch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity)
batched_series = batcher(list(self._columns.values()))
dataframe = type(self)()
dataframe.assign(**(dict(zip(column_names, batched_series))))
return dataframe
@classmethod
def _from_csv_base(cls, filepatterns, get_default_values, has_header,
column_names, num_threads, enqueue_size,
batch_size, queue_capacity, min_after_dequeue, shuffle,
seed):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
get_default_values: a function that produces a list of default values for
each column, given the column names.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if column_names is None:
if not has_header:
raise ValueError("If column_names is None, has_header must be true.")
with gfile.GFile(filenames[0]) as f:
column_names = csv.DictReader(f).fieldnames
if "index" in column_names:
raise ValueError(
"'index' is reserved and can not be used for a column name.")
default_values = get_default_values(column_names)
reader_kwargs = {"skip_header_lines": (1 if has_header else 0)}
index, value = reader_source.TextFileSource(
filenames,
reader_kwargs=reader_kwargs,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = csv_parser.CSVParser(column_names, default_values)
parsed = parser(value)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_csv(cls,
filepatterns,
default_values,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
default_values: a list of default values for each column.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
# pylint: disable=unused-argument
return default_values
return cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
@classmethod
def from_csv_with_feature_spec(cls,
filepatterns,
feature_spec,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files, given a feature_spec.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
feature_spec: a dict mapping column names to `FixedLenFeature` or
`VarLenFeature`.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
return [_get_default_value(feature_spec[name]) for name in column_names]
dataframe = cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
# replace the dense columns with sparse ones in place in the dataframe
for name in dataframe.columns():
if name != "index" and isinstance(feature_spec[name],
parsing_ops.VarLenFeature):
strip_value = _get_default_value(feature_spec[name])
(dataframe[name],) = sparsify.Sparsify(strip_value)(dataframe[name])
return dataframe
@classmethod
def from_examples(cls,
filepatterns,
features,
reader_cls=io_ops.TFRecordReader,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from `tensorflow.Example`s.
Args:
filepatterns: a list of file patterns containing `tensorflow.Example`s.
features: a dict mapping feature names to `VarLenFeature` or
`FixedLenFeature`.
reader_cls: a subclass of `tensorflow.ReaderBase` that will be used to
read the `Example`s.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with `Example`s from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if "index" in features:
raise ValueError(
"'index' is reserved and can not be used for a feature name.")
index, record = reader_source.ReaderSource(
reader_cls,
filenames,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = example_parser.ExampleParser(features)
parsed = parser(record)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_pandas(cls,
pandas_dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="pandas_data"):
"""Create a `tf.learn.DataFrame` from a `pandas.DataFrame`.
Args:
pandas_dataframe: `pandas.DataFrame` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
`pandas_dataframe`.
"""
pandas_source = in_memory_source.PandasSource(
pandas_dataframe,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(pandas_source()._asdict()))
return dataframe
@classmethod
def from_numpy(cls,
numpy_array,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from a `numpy.ndarray`.
The returned `DataFrame` contains two columns: 'index' and 'value'. The
'value' column contains a row from the array. The 'index' column contains
the corresponding row number.
Args:
numpy_array: `numpy.ndarray` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
array.
"""
numpy_source = in_memory_source.NumpySource(
numpy_array,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
@classmethod
def from_ordereddict(cls,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from an `OrderedDict` of `numpy.ndarray`.
The returned `DataFrame` contains a column for each key of the dict plus an
extra 'index' column. The 'index' column contains the row number. Each of
the other columns contains a row from the corresponding array.
Args:
ordered_dict_of_arrays: `OrderedDict` of `numpy.ndarray` that serves as a
data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given arrays.
Raises:
ValueError: `ordered_dict_of_arrays` contains the reserved name 'index'.
"""
numpy_source = in_memory_source.OrderedDictNumpySource(
ordered_dict_of_arrays,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
| apache-2.0 |
rahuldhote/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 244 | 9986 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
aleksandr-bakanov/astropy | astropy/visualization/wcsaxes/ticklabels.py | 4 | 8121 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib import rcParams
from matplotlib.text import Text
from .frame import RectangularFrame
def sort_using(X, Y):
return [x for (y, x) in sorted(zip(Y, X))]
class TickLabels(Text):
def __init__(self, frame, *args, **kwargs):
self.clear()
self._frame = frame
super().__init__(*args, **kwargs)
self.set_clip_on(True)
self.set_visible_axes('all')
self.set_pad(rcParams['xtick.major.pad'])
self._exclude_overlapping = False
# Check rcParams
if 'color' not in kwargs:
self.set_color(rcParams['xtick.color'])
if 'size' not in kwargs:
self.set_size(rcParams['xtick.labelsize'])
def clear(self):
self.world = {}
self.pixel = {}
self.angle = {}
self.text = {}
self.disp = {}
def add(self, axis, world, pixel, angle, text, axis_displacement):
if axis not in self.world:
self.world[axis] = [world]
self.pixel[axis] = [pixel]
self.angle[axis] = [angle]
self.text[axis] = [text]
self.disp[axis] = [axis_displacement]
else:
self.world[axis].append(world)
self.pixel[axis].append(pixel)
self.angle[axis].append(angle)
self.text[axis].append(text)
self.disp[axis].append(axis_displacement)
def sort(self):
"""
Sort by axis displacement, which allows us to figure out which parts
of labels to not repeat.
"""
for axis in self.world:
self.world[axis] = sort_using(self.world[axis], self.disp[axis])
self.pixel[axis] = sort_using(self.pixel[axis], self.disp[axis])
self.angle[axis] = sort_using(self.angle[axis], self.disp[axis])
self.text[axis] = sort_using(self.text[axis], self.disp[axis])
self.disp[axis] = sort_using(self.disp[axis], self.disp[axis])
def simplify_labels(self):
"""
Figure out which parts of labels can be dropped to avoid repetition.
"""
self.sort()
for axis in self.world:
t1 = self.text[axis][0]
for i in range(1, len(self.world[axis])):
t2 = self.text[axis][i]
if len(t1) != len(t2):
t1 = self.text[axis][i]
continue
start = 0
# In the following loop, we need to ignore the last character,
# hence the len(t1) - 1. This is because if we have two strings
# like 13d14m15s we want to make sure that we keep the last
# part (15s) even if the two labels are identical.
for j in range(len(t1) - 1):
if t1[j] != t2[j]:
break
if t1[j] not in '-0123456789.':
start = j + 1
t1 = self.text[axis][i]
if start != 0:
starts_dollar = self.text[axis][i].startswith('$')
self.text[axis][i] = self.text[axis][i][start:]
if starts_dollar:
self.text[axis][i] = '$' + self.text[axis][i]
def set_pad(self, value):
self._pad = value
def get_pad(self):
return self._pad
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == 'all':
return self.world.keys()
else:
return [x for x in self._visible_axes if x in self.world]
def set_exclude_overlapping(self, exclude_overlapping):
self._exclude_overlapping = exclude_overlapping
def draw(self, renderer, bboxes, ticklabels_bbox, tick_out_size):
if not self.get_visible():
return
self.simplify_labels()
text_size = renderer.points_to_pixels(self.get_size())
for axis in self.get_visible_axes():
for i in range(len(self.world[axis])):
# In the event that the label is empty (which is not expected
# but could happen in unforeseen corner cases), we should just
# skip to the next label.
if self.text[axis][i] == '':
continue
self.set_text(self.text[axis][i])
x, y = self.pixel[axis][i]
pad = renderer.points_to_pixels(self.get_pad() + tick_out_size)
if isinstance(self._frame, RectangularFrame):
# This is just to preserve the current results, but can be
# removed next time the reference images are re-generated.
if np.abs(self.angle[axis][i]) < 45.:
ha = 'right'
va = 'bottom'
dx = -pad
dy = -text_size * 0.5
elif np.abs(self.angle[axis][i] - 90.) < 45:
ha = 'center'
va = 'bottom'
dx = 0
dy = -text_size - pad
elif np.abs(self.angle[axis][i] - 180.) < 45:
ha = 'left'
va = 'bottom'
dx = pad
dy = -text_size * 0.5
else:
ha = 'center'
va = 'bottom'
dx = 0
dy = pad
self.set_position((x + dx, y + dy))
self.set_ha(ha)
self.set_va(va)
else:
# This is the more general code for arbitrarily oriented
# axes
# Set initial position and find bounding box
self.set_position((x, y))
bb = super().get_window_extent(renderer)
# Find width and height, as well as angle at which we
# transition which side of the label we use to anchor the
# label.
width = bb.width
height = bb.height
# Project axis angle onto bounding box
ax = np.cos(np.radians(self.angle[axis][i]))
ay = np.sin(np.radians(self.angle[axis][i]))
# Set anchor point for label
if np.abs(self.angle[axis][i]) < 45.:
dx = width
dy = ay * height
elif np.abs(self.angle[axis][i] - 90.) < 45:
dx = ax * width
dy = height
elif np.abs(self.angle[axis][i] - 180.) < 45:
dx = -width
dy = ay * height
else:
dx = ax * width
dy = -height
dx *= 0.5
dy *= 0.5
# Find normalized vector along axis normal, so as to be
# able to nudge the label away by a constant padding factor
dist = np.hypot(dx, dy)
ddx = dx / dist
ddy = dy / dist
dx += ddx * pad
dy += ddy * pad
self.set_position((x - dx, y - dy))
self.set_ha('center')
self.set_va('center')
bb = super().get_window_extent(renderer)
# TODO: the problem here is that we might get rid of a label
# that has a key starting bit such as -0:30 where the -0
# might be dropped from all other labels.
if not self._exclude_overlapping or bb.count_overlaps(bboxes) == 0:
super().draw(renderer)
bboxes.append(bb)
ticklabels_bbox[axis].append(bb)
| bsd-3-clause |
offirt/SMS-Broadcast | sms_broadcast.py | 1 | 2314 | import pandas as pd
import sys, getopt
from twilio.rest import Client
def main(argv):
file, nameColumn, phoneColumn, template, sendReal, twilioSid, twilioToken, twilioFrom = parseArgs(argv)
rows = pd.read_csv(file)
rows = rows[[nameColumn, phoneColumn]]
client = Client(twilioSid, twilioToken)
for index, row in rows.iterrows():
text = template.replace('<name>', row[nameColumn])
phone = '+{}'.format(row[phoneColumn])
sendSms(phone, text, sendReal, client, twilioFrom)
def sendSms(phone, text, sendReal, client, twilioFrom):
print('Sending SMS to {}. text: {}'.format(phone, text))
if sendReal:
message = client.messages.create(
to=phone,
from_=twilioFrom,
body=text)
print(message.sid)
def parseArgs(argv):
try:
opts, args = getopt.getopt(argv, "hsf:t:n:p:i:o:r:",
["send_real", "file=", "template=", "name_column=", "phone_column=", "twilio_sid", "twilio_token", "twilio_from"])
except getopt.GetoptError:
printHelp()
sys.exit(2)
file = ''
template = ''
nameColumn = 'Name'
phoneColumn = 'Phone number'
sendReal = False
twilioSid = ''
twilioToken = ''
twilioFrom = ''
for opt, arg in opts:
if opt == '-h':
printHelp()
sys.exit()
elif opt in ("-s", "--send_real"):
sendReal = True
elif opt in ("-f", "--file"):
file = arg
elif opt in ("-t", "--template"):
template = arg
elif opt in ("-n", "--name_column"):
nameColumn = arg
elif opt in ("-p", "--phone_column"):
phoneColumn = arg
elif opt in ("-i", "--twilio_sid"):
twilioSid = arg
elif opt in ("-o", "--twilio_token"):
twilioToken = arg
elif opt in ("-r", "--twilio_from"):
twilioFrom = arg
return file, nameColumn, phoneColumn, template, sendReal, twilioSid, twilioToken, twilioFrom
def printHelp():
print(
'sms_broadcast.py -s -f <csv_file> -t <text_template> -n <name_column_name> ' +
'-p <phone_number_column_name> -i <twilio_sid> -o <twilio_token> -r <twilio_from>')
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
amolkahat/pandas | asv_bench/benchmarks/indexing.py | 5 | 10167 | import warnings
import numpy as np
import pandas.util.testing as tm
from pandas import (Series, DataFrame, Panel, MultiIndex,
Int64Index, UInt64Index, Float64Index,
IntervalIndex, CategoricalIndex,
IndexSlice, concat, date_range)
class NumericSeriesIndexing(object):
params = [
(Int64Index, UInt64Index, Float64Index),
('unique_monotonic_inc', 'nonunique_monotonic_inc'),
]
param_names = ['index_dtype', 'index_structure']
def setup(self, index, index_structure):
N = 10**6
indices = {
'unique_monotonic_inc': index(range(N)),
'nonunique_monotonic_inc': index(
list(range(55)) + [54] + list(range(55, N - 1))),
}
self.data = Series(np.random.rand(N), index=indices[index_structure])
self.array = np.arange(10000)
self.array_list = self.array.tolist()
def time_getitem_scalar(self, index, index_structure):
self.data[800000]
def time_getitem_slice(self, index, index_structure):
self.data[:800000]
def time_getitem_list_like(self, index, index_structure):
self.data[[800000]]
def time_getitem_array(self, index, index_structure):
self.data[self.array]
def time_getitem_lists(self, index, index_structure):
self.data[self.array_list]
def time_iloc_array(self, index, index_structure):
self.data.iloc[self.array]
def time_iloc_list_like(self, index, index_structure):
self.data.iloc[[800000]]
def time_iloc_scalar(self, index, index_structure):
self.data.iloc[800000]
def time_iloc_slice(self, index, index_structure):
self.data.iloc[:800000]
def time_ix_array(self, index, index_structure):
self.data.ix[self.array]
def time_ix_list_like(self, index, index_structure):
self.data.ix[[800000]]
def time_ix_scalar(self, index, index_structure):
self.data.ix[800000]
def time_ix_slice(self, index, index_structure):
self.data.ix[:800000]
def time_loc_array(self, index, index_structure):
self.data.loc[self.array]
def time_loc_list_like(self, index, index_structure):
self.data.loc[[800000]]
def time_loc_scalar(self, index, index_structure):
self.data.loc[800000]
def time_loc_slice(self, index, index_structure):
self.data.loc[:800000]
class NonNumericSeriesIndexing(object):
params = [
('string', 'datetime'),
('unique_monotonic_inc', 'nonunique_monotonic_inc'),
]
param_names = ['index_dtype', 'index_structure']
def setup(self, index, index_structure):
N = 10**6
indexes = {'string': tm.makeStringIndex(N),
'datetime': date_range('1900', periods=N, freq='s')}
index = indexes[index]
if index_structure == 'nonunique_monotonic_inc':
index = index.insert(item=index[2], loc=2)[:-1]
self.s = Series(np.random.rand(N), index=index)
self.lbl = index[80000]
def time_getitem_label_slice(self, index, index_structure):
self.s[:self.lbl]
def time_getitem_pos_slice(self, index, index_structure):
self.s[:80000]
def time_get_value(self, index, index_structure):
with warnings.catch_warnings(record=True):
self.s.get_value(self.lbl)
def time_getitem_scalar(self, index, index_structure):
self.s[self.lbl]
def time_getitem_list_like(self, index, index_structure):
self.s[[self.lbl]]
class DataFrameStringIndexing(object):
def setup(self):
index = tm.makeStringIndex(1000)
columns = tm.makeStringIndex(30)
self.df = DataFrame(np.random.randn(1000, 30), index=index,
columns=columns)
self.idx_scalar = index[100]
self.col_scalar = columns[10]
self.bool_indexer = self.df[self.col_scalar] > 0
self.bool_obj_indexer = self.bool_indexer.astype(object)
def time_get_value(self):
with warnings.catch_warnings(record=True):
self.df.get_value(self.idx_scalar, self.col_scalar)
def time_ix(self):
self.df.ix[self.idx_scalar, self.col_scalar]
def time_loc(self):
self.df.loc[self.idx_scalar, self.col_scalar]
def time_getitem_scalar(self):
self.df[self.col_scalar][self.idx_scalar]
def time_boolean_rows(self):
self.df[self.bool_indexer]
def time_boolean_rows_object(self):
self.df[self.bool_obj_indexer]
class DataFrameNumericIndexing(object):
def setup(self):
self.idx_dupe = np.array(range(30)) * 99
self.df = DataFrame(np.random.randn(10000, 5))
self.df_dup = concat([self.df, 2 * self.df, 3 * self.df])
self.bool_indexer = [True] * 5000 + [False] * 5000
def time_iloc_dups(self):
self.df_dup.iloc[self.idx_dupe]
def time_loc_dups(self):
self.df_dup.loc[self.idx_dupe]
def time_iloc(self):
self.df.iloc[:100, 0]
def time_loc(self):
self.df.loc[:100, 0]
def time_bool_indexer(self):
self.df[self.bool_indexer]
class Take(object):
params = ['int', 'datetime']
param_names = ['index']
def setup(self, index):
N = 100000
indexes = {'int': Int64Index(np.arange(N)),
'datetime': date_range('2011-01-01', freq='S', periods=N)}
index = indexes[index]
self.s = Series(np.random.rand(N), index=index)
self.indexer = [True, False, True, True, False] * 20000
def time_take(self, index):
self.s.take(self.indexer)
class MultiIndexing(object):
def setup(self):
mi = MultiIndex.from_product([range(1000), range(1000)])
self.s = Series(np.random.randn(1000000), index=mi)
self.df = DataFrame(self.s)
n = 100000
self.mdt = DataFrame({'A': np.random.choice(range(10000, 45000, 1000),
n),
'B': np.random.choice(range(10, 400), n),
'C': np.random.choice(range(1, 150), n),
'D': np.random.choice(range(10000, 45000), n),
'x': np.random.choice(range(400), n),
'y': np.random.choice(range(25), n)})
self.idx = IndexSlice[20000:30000, 20:30, 35:45, 30000:40000]
self.mdt = self.mdt.set_index(['A', 'B', 'C', 'D']).sort_index()
def time_series_ix(self):
self.s.ix[999]
def time_frame_ix(self):
self.df.ix[999]
def time_index_slice(self):
self.mdt.loc[self.idx, :]
class IntervalIndexing(object):
def setup_cache(self):
idx = IntervalIndex.from_breaks(np.arange(1000001))
monotonic = Series(np.arange(1000000), index=idx)
return monotonic
def time_getitem_scalar(self, monotonic):
monotonic[80000]
def time_loc_scalar(self, monotonic):
monotonic.loc[80000]
def time_getitem_list(self, monotonic):
monotonic[80000:]
def time_loc_list(self, monotonic):
monotonic.loc[80000:]
class CategoricalIndexIndexing(object):
params = ['monotonic_incr', 'monotonic_decr', 'non_monotonic']
param_names = ['index']
def setup(self, index):
N = 10**5
values = list('a' * N + 'b' * N + 'c' * N)
indices = {
'monotonic_incr': CategoricalIndex(values),
'monotonic_decr': CategoricalIndex(reversed(values)),
'non_monotonic': CategoricalIndex(list('abc' * N))}
self.data = indices[index]
self.int_scalar = 10000
self.int_list = list(range(10000))
self.cat_scalar = 'b'
self.cat_list = ['a', 'c']
def time_getitem_scalar(self, index):
self.data[self.int_scalar]
def time_getitem_slice(self, index):
self.data[:self.int_scalar]
def time_getitem_list_like(self, index):
self.data[[self.int_scalar]]
def time_getitem_list(self, index):
self.data[self.int_list]
def time_getitem_bool_array(self, index):
self.data[self.data == self.cat_scalar]
def time_get_loc_scalar(self, index):
self.data.get_loc(self.cat_scalar)
def time_get_indexer_list(self, index):
self.data.get_indexer(self.cat_list)
class PanelIndexing(object):
def setup(self):
with warnings.catch_warnings(record=True):
self.p = Panel(np.random.randn(100, 100, 100))
self.inds = range(0, 100, 10)
def time_subset(self):
with warnings.catch_warnings(record=True):
self.p.ix[(self.inds, self.inds, self.inds)]
class MethodLookup(object):
def setup_cache(self):
s = Series()
return s
def time_lookup_iloc(self, s):
s.iloc
def time_lookup_ix(self, s):
s.ix
def time_lookup_loc(self, s):
s.loc
class GetItemSingleColumn(object):
def setup(self):
self.df_string_col = DataFrame(np.random.randn(3000, 1), columns=['A'])
self.df_int_col = DataFrame(np.random.randn(3000, 1))
def time_frame_getitem_single_column_label(self):
self.df_string_col['A']
def time_frame_getitem_single_column_int(self):
self.df_int_col[0]
class AssignTimeseriesIndex(object):
def setup(self):
N = 100000
idx = date_range('1/1/2000', periods=N, freq='H')
self.df = DataFrame(np.random.randn(N, 1), columns=['A'], index=idx)
def time_frame_assign_timeseries_index(self):
self.df['date'] = self.df.index
class InsertColumns(object):
def setup(self):
self.N = 10**3
self.df = DataFrame(index=range(self.N))
def time_insert(self):
np.random.seed(1234)
for i in range(100):
self.df.insert(0, i, np.random.randn(self.N),
allow_duplicates=True)
def time_assign_with_setitem(self):
np.random.seed(1234)
for i in range(100):
self.df[i] = np.random.randn(self.N)
from .pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
public-ink/public-ink | server/appengine/lib/matplotlib/tight_layout.py | 4 | 13206 | """
This module provides routines to adjust subplot params so that subplots are
nicely fit in the figure. In doing so, only axis labels, tick labels, axes
titles and offsetboxes that are anchored to axes are currently considered.
Internally, it assumes that the margins (left_margin, etc.) which are
differences between ax.get_tightbbox and ax.bbox are independent of axes
position. This may fail if Axes.adjustable is datalim. Also, This will fail
for some cases (for example, left or right margin is affected by xlabel).
"""
import warnings
import matplotlib
from matplotlib.transforms import TransformedBbox, Bbox
from matplotlib.font_manager import FontProperties
rcParams = matplotlib.rcParams
def _get_left(tight_bbox, axes_bbox):
return axes_bbox.xmin - tight_bbox.xmin
def _get_right(tight_bbox, axes_bbox):
return tight_bbox.xmax - axes_bbox.xmax
def _get_bottom(tight_bbox, axes_bbox):
return axes_bbox.ymin - tight_bbox.ymin
def _get_top(tight_bbox, axes_bbox):
return tight_bbox.ymax - axes_bbox.ymax
def auto_adjust_subplotpars(fig, renderer,
nrows_ncols,
num1num2_list,
subplot_list,
ax_bbox_list=None,
pad=1.08, h_pad=None, w_pad=None,
rect=None):
"""
Return a dictionary of subplot parameters so that spacing between
subplots are adjusted. Note that this function ignore geometry
information of subplot itself, but uses what is given by
*nrows_ncols* and *num1num2_list* parameteres. Also, the results could be
incorrect if some subplots have ``adjustable=datalim``.
Parameters:
nrows_ncols
number of rows and number of columns of the grid.
num1num2_list
list of numbers specifying the area occupied by the subplot
subplot_list
list of subplots that will be used to calcuate optimal subplot_params.
pad : float
padding between the figure edge and the edges of subplots, as a fraction
of the font-size.
h_pad, w_pad : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
rect
[left, bottom, right, top] in normalized (0, 1) figure coordinates.
"""
rows, cols = nrows_ncols
pad_inches = pad * FontProperties(
size=rcParams["font.size"]).get_size_in_points() / 72.
if h_pad is not None:
vpad_inches = h_pad * FontProperties(
size=rcParams["font.size"]).get_size_in_points() / 72.
else:
vpad_inches = pad_inches
if w_pad is not None:
hpad_inches = w_pad * FontProperties(
size=rcParams["font.size"]).get_size_in_points() / 72.
else:
hpad_inches = pad_inches
if len(subplot_list) == 0:
raise RuntimeError("")
if len(num1num2_list) != len(subplot_list):
raise RuntimeError("")
if rect is None:
margin_left = None
margin_bottom = None
margin_right = None
margin_top = None
else:
margin_left, margin_bottom, _right, _top = rect
if _right:
margin_right = 1. - _right
else:
margin_right = None
if _top:
margin_top = 1. - _top
else:
margin_top = None
vspaces = [[] for i in range((rows + 1) * cols)]
hspaces = [[] for i in range(rows * (cols + 1))]
union = Bbox.union
if ax_bbox_list is None:
ax_bbox_list = []
for subplots in subplot_list:
ax_bbox = union([ax.get_position(original=True)
for ax in subplots])
ax_bbox_list.append(ax_bbox)
for subplots, ax_bbox, (num1, num2) in zip(subplot_list,
ax_bbox_list,
num1num2_list):
if all([not ax.get_visible() for ax in subplots]):
continue
tight_bbox_raw = union([ax.get_tightbbox(renderer) for ax in subplots
if ax.get_visible()])
tight_bbox = TransformedBbox(tight_bbox_raw,
fig.transFigure.inverted())
row1, col1 = divmod(num1, cols)
if num2 is None:
# left
hspaces[row1 * (cols + 1) + col1].append(
_get_left(tight_bbox, ax_bbox))
# right
hspaces[row1 * (cols + 1) + (col1 + 1)].append(
_get_right(tight_bbox, ax_bbox))
# top
vspaces[row1 * cols + col1].append(
_get_top(tight_bbox, ax_bbox))
# bottom
vspaces[(row1 + 1) * cols + col1].append(
_get_bottom(tight_bbox, ax_bbox))
else:
row2, col2 = divmod(num2, cols)
for row_i in range(row1, row2 + 1):
# left
hspaces[row_i * (cols + 1) + col1].append(
_get_left(tight_bbox, ax_bbox))
# right
hspaces[row_i * (cols + 1) + (col2 + 1)].append(
_get_right(tight_bbox, ax_bbox))
for col_i in range(col1, col2 + 1):
# top
vspaces[row1 * cols + col_i].append(
_get_top(tight_bbox, ax_bbox))
# bottom
vspaces[(row2 + 1) * cols + col_i].append(
_get_bottom(tight_bbox, ax_bbox))
fig_width_inch, fig_height_inch = fig.get_size_inches()
# margins can be negative for axes with aspect applied. And we
# append + [0] to make minimum margins 0
if not margin_left:
margin_left = max([sum(s) for s in hspaces[::cols + 1]] + [0])
margin_left += pad_inches / fig_width_inch
if not margin_right:
margin_right = max([sum(s) for s in hspaces[cols::cols + 1]] + [0])
margin_right += pad_inches / fig_width_inch
if not margin_top:
margin_top = max([sum(s) for s in vspaces[:cols]] + [0])
margin_top += pad_inches / fig_height_inch
if not margin_bottom:
margin_bottom = max([sum(s) for s in vspaces[-cols:]] + [0])
margin_bottom += pad_inches / fig_height_inch
kwargs = dict(left=margin_left,
right=1 - margin_right,
bottom=margin_bottom,
top=1 - margin_top)
if cols > 1:
hspace = max([sum(s)
for i in range(rows)
for s
in hspaces[i * (cols + 1) + 1:(i + 1) * (cols + 1) - 1]])
hspace += hpad_inches / fig_width_inch
h_axes = ((1 - margin_right - margin_left) -
hspace * (cols - 1)) / cols
kwargs["wspace"] = hspace / h_axes
if rows > 1:
vspace = max([sum(s) for s in vspaces[cols:-cols]])
vspace += vpad_inches / fig_height_inch
v_axes = ((1 - margin_top - margin_bottom) -
vspace * (rows - 1)) / rows
kwargs["hspace"] = vspace / v_axes
return kwargs
def get_renderer(fig):
if fig._cachedRenderer:
renderer = fig._cachedRenderer
else:
canvas = fig.canvas
if canvas and hasattr(canvas, "get_renderer"):
renderer = canvas.get_renderer()
else:
# not sure if this can happen
warnings.warn("tight_layout : falling back to Agg renderer")
from matplotlib.backends.backend_agg import FigureCanvasAgg
canvas = FigureCanvasAgg(fig)
renderer = canvas.get_renderer()
return renderer
def get_subplotspec_list(axes_list, grid_spec=None):
"""
Return a list of subplotspec from the given list of axes. For an
instance of axes that does not support subplotspec, None is
inserted in the list.
If grid_spec is given, None is inserted for those not from
the given grid_spec.
"""
subplotspec_list = []
for ax in axes_list:
axes_or_locator = ax.get_axes_locator()
if axes_or_locator is None:
axes_or_locator = ax
if hasattr(axes_or_locator, "get_subplotspec"):
subplotspec = axes_or_locator.get_subplotspec()
subplotspec = subplotspec.get_topmost_subplotspec()
gs = subplotspec.get_gridspec()
if grid_spec is not None:
if gs != grid_spec:
subplotspec = None
elif gs.locally_modified_subplot_params():
subplotspec = None
else:
subplotspec = None
subplotspec_list.append(subplotspec)
return subplotspec_list
def get_tight_layout_figure(fig, axes_list, subplotspec_list, renderer,
pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Return subplot parameters for tight-layouted-figure with specified
padding.
Parameters:
*fig* : figure instance
*axes_list* : a list of axes
*subplotspec_list* : a list of subplotspec associated with each
axes in axes_list
*renderer* : renderer instance
*pad* : float
padding between the figure edge and the edges of subplots,
as a fraction of the font-size.
*h_pad*, *w_pad* : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
*rect* : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
subplot_list = []
nrows_list = []
ncols_list = []
ax_bbox_list = []
subplot_dict = {} # multiple axes can share
# same subplot_interface (e.g., axes_grid1). Thus
# we need to join them together.
subplotspec_list2 = []
for ax, subplotspec in zip(axes_list,
subplotspec_list):
if subplotspec is None:
continue
subplots = subplot_dict.setdefault(subplotspec, [])
if not subplots:
myrows, mycols, _, _ = subplotspec.get_geometry()
nrows_list.append(myrows)
ncols_list.append(mycols)
subplotspec_list2.append(subplotspec)
subplot_list.append(subplots)
ax_bbox_list.append(subplotspec.get_position(fig))
subplots.append(ax)
max_nrows = max(nrows_list)
max_ncols = max(ncols_list)
num1num2_list = []
for subplotspec in subplotspec_list2:
rows, cols, num1, num2 = subplotspec.get_geometry()
div_row, mod_row = divmod(max_nrows, rows)
div_col, mod_col = divmod(max_ncols, cols)
if (mod_row != 0) or (mod_col != 0):
raise RuntimeError("")
rowNum1, colNum1 = divmod(num1, cols)
if num2 is None:
rowNum2, colNum2 = rowNum1, colNum1
else:
rowNum2, colNum2 = divmod(num2, cols)
num1num2_list.append((rowNum1 * div_row * max_ncols +
colNum1 * div_col,
((rowNum2 + 1) * div_row - 1) * max_ncols +
(colNum2 + 1) * div_col - 1))
kwargs = auto_adjust_subplotpars(fig, renderer,
nrows_ncols=(max_nrows, max_ncols),
num1num2_list=num1num2_list,
subplot_list=subplot_list,
ax_bbox_list=ax_bbox_list,
pad=pad, h_pad=h_pad, w_pad=w_pad)
if rect is not None:
# if rect is given, the whole subplots area (including
# labels) will fit into the rect instead of the
# figure. Note that the rect argument of
# *auto_adjust_subplotpars* specify the area that will be
# covered by the total area of axes.bbox. Thus we call
# auto_adjust_subplotpars twice, where the second run
# with adjusted rect parameters.
left, bottom, right, top = rect
if left is not None:
left += kwargs["left"]
if bottom is not None:
bottom += kwargs["bottom"]
if right is not None:
right -= (1 - kwargs["right"])
if top is not None:
top -= (1 - kwargs["top"])
#if h_pad is None: h_pad = pad
#if w_pad is None: w_pad = pad
kwargs = auto_adjust_subplotpars(fig, renderer,
nrows_ncols=(max_nrows, max_ncols),
num1num2_list=num1num2_list,
subplot_list=subplot_list,
ax_bbox_list=ax_bbox_list,
pad=pad, h_pad=h_pad, w_pad=w_pad,
rect=(left, bottom, right, top))
return kwargs
| gpl-3.0 |
JunkieStyle/corgi | corgi/experiment.py | 1 | 14841 | import time
import xgboost as xgb
import lightgbm as lgb
import catboost as cat
import pandas as pd
import numpy as np
from hyperopt import fmin, tpe, Trials, STATUS_OK, STATUS_FAIL, hp
import os
from os.path import join
from os import listdir
from sklearn.model_selection import StratifiedKFold, TimeSeriesSplit, KFold
from sklearn.utils import check_array, check_X_y
from copy import deepcopy
class ScikitDataset:
def __init__(self, x, y=None):
if isinstance(x, pd.DataFrame):
self.x = check_array(x.values)
self.features = x.columns
elif isinstance(x, np.ndarray):
self.x = check_array(x)
self.features = ['f' + str(i+1) for i in range(0, x.shape[1])]
else:
raise TypeError('X should be an instance of pandas.DataFrame or numpy.ndarray')
if isinstance(y, pd.Series):
self.y = check_array(y.values, ensure_2d=False)
elif isinstance(y, pd.DataFrame):
self.y = check_array(y.iloc[:, 0].values, ensure_2d=False)
elif isinstance(y, np.ndarray):
self.y = check_array(y, ensure_2d=False)
elif y is None:
self.y = y
else:
raise TypeError('Y should be an instance of pandas.Series, numpy.ndarray or None for test sample')
if y is not None:
self.x, self.y = check_X_y(self.x, self.y)
def get_label(self):
return self.y
class BaseExperiment:
def __init__(self, data_path, learning_task, eval_metric, validation_type, greater_is_better=False, n_folds=None,
datetime_feature=None, output_path='', validation_seed=0):
self.learning_task = learning_task
self.data_path = data_path
self.eval_metric = eval_metric
self.validation_type = validation_type
self.n_folds = n_folds
self.datetime_feature = datetime_feature
self.validation_seed = validation_seed
self.output_path = output_path
self.greater_is_better = greater_is_better
self.best_params = None
if self.learning_task not in ['classification', 'regression']:
raise ValueError('Task type must be "classification" or "regression"')
if not callable(eval_metric):
raise ValueError('Wrong metric provided: eval_metric must be callable')
if validation_type == 'cv':
if self.n_folds is None:
raise ValueError('For "cv" validation type argument "n_folds" must be specified')
elif validation_type == 'datetime':
if self.datetime_feature is None:
raise ValueError('For "datetime" validation type argument "holdout_size" must be specified')
if self.n_folds is None:
raise ValueError('For "datetime" validation type argument "n_folds" must be specified')
else:
raise ValueError('Validation type must be "cv" or "datetime"')
def read_data(self):
files_in_data_dir = listdir(self.data_path)
if 'x_train.csv' not in files_in_data_dir:
raise ValueError('File "x_train.csv" is no found in directory "{0}"'.format(self.data_path))
else:
self.x = pd.read_csv(join(self.data_path, 'x_train.csv'))
if 'y_train.csv' not in files_in_data_dir:
raise ValueError('File "y_train.csv" is no found in directory "{0}"'.format(self.data_path))
else:
self.y = pd.read_csv(join(self.data_path, 'y_train.csv'))
if 'target' not in self.y.columns:
raise ValueError('File "y_train.csv" does not have "target" columns')
self.y = self.y.target
def make_splits(self):
if self.validation_type == 'cv':
if self.learning_task == 'classification':
self.cv = StratifiedKFold(n_splits=self.n_folds, shuffle=True, random_state=self.validation_seed)
else:
self.cv = KFold(n_splits=self.n_folds, shuffle=True, random_state=self.validation_seed)
elif self.validation_type == 'datetime':
self.x = self.x.sort_values(self.datetime_feature, ascending=True)
self.cv = TimeSeriesSplit(n_splits=self.n_folds)
else:
raise ValueError('Validation type must be "cv" or "datetime"')
cv_pairs = []
for train_idx, test_idx in self.cv.split(self.x, self.y):
x_train, x_test = self.x.iloc[train_idx], self.x.iloc[test_idx]
y_train, y_test = self.y.iloc[train_idx], self.y.iloc[test_idx]
dtrain = self._convert_to_dataset(x_train, y_train)
dtest = self._convert_to_dataset(x_test, y_test)
cv_pairs.append((dtrain, dtest))
return cv_pairs
def eval_metric(self, *args, **kwargs):
raise NotImplementedError('Method "eval_metric" is not implemented.')
def _fit_predict(self, dtrain, dtest, params):
raise NotImplementedError('Method "fit" is not implemented.')
def fit_predict(self, x_train, y_train, x_test):
raise NotImplementedError('Method "fit" is not implemented.')
def _convert_to_dataset(self, x, y):
raise NotImplementedError('Method "to_dataset" is not implemented.')
def _get_params(self):
raise NotImplementedError('Method "_get_params" is not implemented.')
def _get_seed_attribute_name(self):
raise NotImplementedError('Method "_get_seed_attribute_name is not implemented"')
def run(self, use_best_params=False, verbose=True):
if use_best_params and self.best_params is None:
raise AttributeError('There is no attribute "best_params". Use method "run_optimize" first')
self.read_data()
cv_pairs = self.make_splits()
params = self.best_params if use_best_params else None
result = self._run_cv(cv_pairs, None)
if verbose:
print('[SING]\t\teval_time={0:.2} sec\t{1}={2:.6f}'.format(
result['eval_time'], self.eval_metric.__name__, result['loss']))
return result
def run_bagging(self, seeds, use_best_params=False, verbose=True):
if use_best_params and self.best_params is None:
raise AttributeError('There is no attribute "best_params". Use method "run_optimize" first')
self.read_data()
cv_pairs = self.make_splits()
seed_name = self._get_seed_attribute_name()
self.bagging_results = []
self.bagging_preds = []
for i, seed in enumerate(seeds):
seed_params = self.best_params if use_best_params else {}
seed_params.update({seed_name: seed})
bag = self._run_cv(cv_pairs, seed_params)
self.bagging_results.append(bag)
self.bagging_preds.append(bag['preds'])
if verbose:
print('[{0}/{1}]\tseed={2}\teval_time={3:.2f} sec\t{4}={5:.6f}'.format(
i+1, len(seeds), seed, bag['eval_time'], self.eval_metric.__name__, bag['loss']))
self.bagging_preds = np.array(self.bagging_preds).mean(axis=0).T
self.bagging_loss = np.mean([self.eval_metric(dtest.get_label(), bag_pred)
for (_, dtest), bag_pred in zip(cv_pairs, self.bagging_preds.T)])
self.bagging_std = np.std([self.eval_metric(dtest.get_label(), bag_pred)
for (_, dtest), bag_pred in zip(cv_pairs, self.bagging_preds.T)])
self.bagging_eval_time = sum([bag['eval_time'] for bag in self.bagging_results])
if verbose:
print('-' * 80)
print('[TOTAL]\t\teval_time={0:.2f} sec\t{1}={2:.6f}\tstd={3:.3f}'.format(
self.bagging_eval_time, self.eval_metric.__name__, self.bagging_loss, self.bagging_std))
return self.bagging_results
def run_optimize(self, space, max_evals=5, seed=0, verbose=True):
self.read_data()
cv_pairs = self.make_splits()
self.space = space
self.trials = Trials()
self._hyperopt_eval_num, self.best_loss = 0, np.inf
self._hyperopt_max_evals = max_evals
start_time = time.time()
_ = fmin(fn=lambda params: self._run_cv(cv_pairs, params, opti=True, opti_verbose=verbose),
space=space, algo=tpe.suggest, max_evals=max_evals, trials=self.trials,
rstate=np.random.RandomState(seed=seed))
opti_time = time.time() - start_time
if verbose:
print('-' * 80)
print('[TOTAL]\t\teval_time={0:.2f} sec\tbest={1:.6f}\n'.format(opti_time, self.best_loss))
print(self.best_params)
# self.best_params = self.trials.best_trial['result']['params']
# self.best_n_estimators = self.trials.best_trial['result']['best_n_estimators']
return [trial['result'] for trial in self.trials.trials]
def _run_cv(self, cv_pairs, additional_params, opti=False, opti_verbose=False):
params = self._get_params()
if additional_params:
params.update(additional_params)
self.evals, start_time = [], time.time()
preds = []
for dtrain, dtest in cv_pairs:
model, y_pred = self._fit_predict(dtrain, dtest, params)
self.evals.append(self.eval_metric(dtest.get_label(), y_pred))
preds.append(y_pred)
eval_time = time.time() - start_time
greater_is_better = -1 if self.greater_is_better else 1
mean_loss = np.mean(self.evals) * greater_is_better
result_cv = {
'loss': mean_loss,
'status': STATUS_FAIL if np.isnan(mean_loss) else STATUS_OK,
'params': params,
'eval_time': eval_time,
'preds': preds
}
if opti:
self._hyperopt_eval_num += 1
if mean_loss < self.best_loss:
self.best_loss = mean_loss
self.best_params = params
if opti_verbose:
print('[{0}/{1}]\t\teval_time={2:.2f} sec\t{3}={4:.6f}\tbest={5:.6f}'.format(
self._hyperopt_eval_num, self._hyperopt_max_evals, eval_time,
self.eval_metric.__name__, mean_loss, self.best_loss))
return result_cv
class ScikitExperiment(BaseExperiment):
def __init__(self, skl_model, data_path, learning_task, eval_metric, validation_type, greater_is_better=False,
n_folds=None, datetime_feature=None, output_path='', validation_seed=0):
super(ScikitExperiment, self).__init__(data_path, learning_task, eval_metric, validation_type, greater_is_better,
n_folds, datetime_feature, output_path, validation_seed)
self.model = skl_model
def _convert_to_dataset(self, x, y=None):
return ScikitDataset(x, y)
def _fit_predict(self, dtrain, dtest, params=None):
model = deepcopy(self.model)
model = model.set_params(**params)
model.fit(dtrain.x, dtrain.y)
if self.learning_task == 'classification':
y_pred = model.predict_proba(dtest.x)
if len(set(dtrain.get_label())) == 2:
y_pred = y_pred[:, 1]
else:
y_pred = model.predict(dtest.x)
return model, y_pred
def fit_predict(self, x_train, y_train, x_test, use_best_params=False, return_fitted_model=False):
if use_best_params and self.best_params is None:
raise AttributeError('There is no attribute "best_params". Use method "run_optimize" first')
params = self.best_params if use_best_params else self._get_params()
dtrain = self._convert_to_dataset(x_train, y_train)
dtest = self._convert_to_dataset(x_test)
model, y_pred = self._fit_predict(dtrain, dtest, params=params)
if return_fitted_model:
return y_pred, model
else:
return y_pred
def _get_params(self):
return self.model.get_params()
def _get_seed_attribute_name(self):
if hasattr(self.model, 'random_state'):
return 'random_state'
else:
raise AttributeError('Provided SKL model doesn\'t have "random_state" attribute, can\'t perform bagging')
class BoostingExperiment(BaseExperiment):
def __init__(self, bst_name, bst_params, bst_rounds, data_path, learning_task, eval_metric, validation_type,
greater_is_better=False, n_folds=None, datetime_feature=None, output_path='', validation_seed=0):
super(BoostingExperiment, self).__init__(data_path, learning_task, eval_metric, validation_type, greater_is_better,
n_folds, datetime_feature, output_path, validation_seed)
self.params = bst_params
self.name = bst_name
self.early = int(bst_rounds / 10)
self.rounds = bst_rounds
self.model = None
if bst_rounds <= 0:
raise ValueError('bst_rounds must be >= 1')
if bst_name not in ['xgboost', 'lightgbm', 'catboost']:
raise ValueError('Unknown booster name {0}. Should be one of the folowing - xgboost, lightgbm, catboost')
def _get_params(self):
return self.params
def _get_seed_attribute_name(self):
return 'seed'
def _convert_to_dataset(self, x, y=None):
if self.name == 'xgboost':
return xgb.DMatrix(x, label=y)
elif self.name == 'lightgbm':
return lgb.Dataset(x, label=y, free_raw_data=False)
elif self.name == 'catboost':
return cat.Pool(x, label=y)
@property
def _eval_metric(self):
def __eval_metric(preds, train_dataset):
labels = train_dataset.get_label()
return self.eval_metric.__name__, self.eval_metric(labels, preds), self.greater_is_better
return __eval_metric
def _fit_predict(self, dtrain, dtest, params):
model, y_pred = None, None
if self.name == 'xgboost':
model = xgb.train(params, dtrain, self.rounds, evals=(dtrain, dtest),
feval=self._eval_metric, early_stopping_rounds=self.early)
y_pred = model.predict(dtest)
elif self.name == 'lightgbm':
evals_result = {}
model = lgb.train(params, dtrain, self.rounds, valid_sets=dtest, valid_names='test',
feval=self._eval_metric, verbose_eval=0, evals_result=evals_result)
y_pred = model.predict(dtest.data)
elif self.name == 'catboost':
pass
return model, y_pred
# if self.learning_task == 'classification':
# y_pred = model.predict_proba(dtest.x)
# if len(set(dtrain.get_label())) == 2:
# y_pred = y_pred[:, 1]
# else:
# y_pred = model.predict(dtest.x)
| mit |
OshynSong/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
sinhrks/pandas-ml | doc/source/conf.py | 3 | 8742 | # -*- coding: utf-8 -*-
#
# pandas-ml documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 22 14:58:37 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.autosummary',
'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pandas_ml'
copyright = u'2015, sinhrks'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3.0'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pandas_mldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pandas_ml.tex', u'pandas-ml Documentation',
u'sinhrks', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pandas_ml', u'pandas-ml Documentation',
[u'sinhrks'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pandas_ml', u'pandas-ml Documentation',
u'sinhrks', 'pandas_ml', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# -- Build API doc ----------------------------------------------------------
numpydoc_show_class_members = False
fpath = os.path.dirname(__file__)
gen_path = os.path.join(fpath, 'generated')
app_path = os.path.join(os.path.dirname(os.path.dirname(fpath)), 'pandas_ml')
os.system('sphinx-apidoc -f -E -o {0} {1}'.format(gen_path, app_path))
| bsd-3-clause |
espensirnes/paneltime | paneltime/regstats.py | 2 | 9494 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#This module calculates statistics and saves it to a file
import statproc as stat
import numpy as np
import regprocs as rp
from scipy import stats as scstats
import csv
import os
import sys
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import functions as fu
import loglikelihood as logl
class statistics:
def __init__(self,results_obj,robustcov_lags=100,correl_vars=None,descriptives_vars=None,simple_statistics=False):
"""This class calculates, stores and prints statistics and statistics"""
self.G=results_obj.gradient_matrix
self.H=results_obj.hessian
self.ll=results_obj.ll
self.panel=results_obj.panel
self.ll.standardize()
self.Rsq, self.Rsqadj, self.LL_ratio,self.LL_ratio_OLS=stat.goodness_of_fit(self.ll,True)
self.LL_restricted=logl.LL(self.panel.args.args_restricted, self.panel).LL
self.LL_OLS=logl.LL(self.panel.args.args_OLS, self.panel).LL
(self.reg_output,
self.names,
self.args,
self.se_robust,
self.se_st,
self.tstat,
self.tsign,
sign_codes)=self.coeficient_output(self.H,self.G,robustcov_lags,self.ll)
if simple_statistics:
return
self.coeficient_printout(sign_codes)
self.no_ac_prob,rhos,RSqAC=stat.breusch_godfrey_test(self.panel,self.ll,10)
self.norm_prob=stat.JB_normality_test(self.ll.e_norm,self.panel)
self.multicollinearity_check(self.G)
self.data_correlations,self.data_statistics=self.correl_and_statistics(correl_vars,descriptives_vars)
scatterplots(self.panel)
print ( 'LL: %s' %(self.ll.LL,))
self.adf_test=stat.adf_test(self.panel,self.ll,10)
self.save_stats(self.ll)
def correl_and_statistics(self,correl_vars,descriptives_vars):
panel=self.panel
X_names=[]
X=[]
correl_X,correl_names=get_variables(panel, correl_vars)
descr_X,descr_names=get_variables(panel, descriptives_vars)
c=stat.correl(correl_X)
c=np.concatenate((correl_names,c),0)
n=descr_X.shape[1]
vstat=np.concatenate((np.mean(descr_X,0).reshape((n,1)),
np.std(descr_X,0).reshape((n,1)),
np.min(descr_X,0).reshape((n,1)),
np.max(descr_X,0).reshape((n,1))),1)
vstat=np.concatenate((descr_names.T,vstat),1)
vstat=np.concatenate(([['','Mean','SD','min','max']],vstat),0)
correl_names=np.append([['']],correl_names,1).T
c=np.concatenate((correl_names,c),1)
return c,vstat
def coeficient_output(self,H,G,robustcov_lags,ll):
panel=self.panel
args=ll.args.args_v
robust_cov_matrix,cov=rp.sandwich(H,G,robustcov_lags,ret_hessin=True)
se_robust=np.maximum(np.diag(robust_cov_matrix).flatten(),1e-200)**0.5
se_st=np.maximum(np.diag(cov).flatten(),1e-200)**0.5
names=np.array(panel.args.names_v)
T=len(se_robust)
output=[]
tstat=np.maximum(np.minimum((args)/((se_robust<=0)*args*1e-15+se_robust),3000),-3000)
tsign=1-scstats.t.cdf(np.abs(tstat),panel.df)
sign_codes=get_sign_codes(tsign)
output=np.concatenate((names.reshape((T,1)),
args.reshape((T,1)),
se_robust.reshape((T,1)),
se_st.reshape((T,1)),
tstat.reshape((T,1)),
tsign.reshape((T,1)),
sign_codes.reshape((T,1))),1)
output=np.concatenate(([['Regressor:','coef:','SE sandwich:','SE standard:','t-value:','t-sign:','sign codes:']],output),0)
return output,names,args,se_robust,se_st,tstat,tsign,sign_codes
def coeficient_printout(self,sign_codes):
names,args,se,se_st,tstat,tsign=self.names,self.args,self.se_robust,self.se_st,self.tstat,self.tsign
T=len(se)
printout=np.zeros((T,6),dtype='<U24')
maxlen=0
for i in names:
maxlen=max((len(i)+1,maxlen))
printout[:,0]=[s.ljust(maxlen) for s in names]
rndlen=10
rndlen0=8
args=np.round(args,rndlen0).astype('<U'+str(rndlen))
tstat=np.round(tstat,rndlen0).astype('<U'+str(rndlen))
se=np.round(se,rndlen0).astype('<U'+str(rndlen))
se_st=np.round(se_st,rndlen0).astype('<U'+str(rndlen))
tsign=np.round(tsign,rndlen0).astype('<U'+str(rndlen))
sep=' '
prstr=' '*(maxlen+rndlen+2*len(sep)) + '_'*int(rndlen+1)+'SE'+'_'*int(rndlen)+'\n'
prstr+='Variable names'.ljust(maxlen)[:maxlen]+sep
prstr+='Coef'.ljust(rndlen)[:rndlen]+sep
prstr+='sandwich'.ljust(rndlen)[:rndlen]+sep
prstr+='standard'.ljust(rndlen)[:rndlen]+sep
prstr+='t-stat.'.ljust(rndlen)[:rndlen]+sep
prstr+='sign.'.ljust(rndlen)[:rndlen]+sep
prstr+='\n'
for i in range(T):
b=str(args[i])
t=str(tstat[i])
if b[0]!='-':
b=' '+b
t=' '+t
prstr+=names[i].ljust(maxlen)[:maxlen]+sep
prstr+=b.ljust(rndlen)[:rndlen]+sep
prstr+=se[i].ljust(rndlen)[:rndlen]+sep
prstr+=se_st[i].ljust(rndlen)[:rndlen]+sep
prstr+=t.ljust(rndlen)[:rndlen]+sep
prstr+=tsign[i].ljust(rndlen)[:rndlen]+sep
prstr+=sign_codes[i]
prstr+='\n'
prstr+='\n'+"Significance codes: '=0.1, *=0.05, **=0.01, ***=0.001, |=collinear"
print(prstr)
def multicollinearity_check(self,G):
"Returns a variance decompostition matrix with headings"
panel=self.panel
vNames=['Max(var_proportion)','CI:']+panel.args.names_v
k=len(vNames)-1
matr=stat.var_decomposition(X=G,concat=True)
matr=np.round(matr,3)
maxp=np.max(matr[:,1:],1).reshape((matr.shape[0],1))
matr=np.concatenate((maxp,matr),1)
matr=np.concatenate(([vNames],matr))
self.MultiColl=matr
def save_stats(self,ll,strappend=''):
"""Saves the various statistics assigned to self"""
panel=self.panel
N,T,k=panel.X.shape
output=dict()
name_list=[]
add_output(output,name_list,'Information',[
['Description:',panel.descr],
['LL:',ll.LL],
['Number of IDs:',N],
['Maximum number of dates:',T],
['A) Total number of observations:',panel.NT_before_loss],
['B) Observations lost to GARCH/ARIMA',panel.tot_lost_obs],
[' Total after loss of observations (A-B):',panel.NT],
['C) Number of Random Effects coefficients:',N],
['D) Number of Fixed Effects coefficients in the variance process:',N],
['E) Number of coefficients:',panel.len_args],
['DF (A-B-C-D-E):',panel.df],
['RSq:',self.Rsq],
['RSq Adj:',self.Rsqadj],
['LL-ratio:',self.LL_ratio],
['no ac_prob:',self.no_ac_prob],
['norm prob:',self.norm_prob],
['ADF (dicky fuller):',self.adf_test, "1% and 5 % lower limit of confidence intervals, respectively"],
['Dependent:',panel.Y_names]
])
add_output(output,name_list,'Regression',self.reg_output)
add_output(output,name_list,'Multicollinearity',self.MultiColl)
add_output(output,name_list,'Descriptive statistics',self.data_statistics)
add_output(output,name_list,'Correlation Matrix',self.data_correlations)
add_output(output,name_list,'Number of dates in each ID',panel.T_arr.reshape((N,1)))
output_table=[['']]
output_positions=['']
for i in name_list:
if i!='Statistics':
output_table.extend([[''],['']])
pos=len(output_table)+1
output_table.extend([[i+':']])
output_table.extend(output[i])
output_positions.append('%s~%s~%s~%s' %(i,pos,len(output[i]),len(output[i][0])))
output_table[0]=output_positions
fu.savevar(output_table,'output/'+panel.descr+strappend+'.csv')
self.output=output
def add_variable(name,panel,names,variables):
if name in panel.dataframe.keys():
d=dict(panel.dataframe[[name]])
if type(d)==np.ndarray:
names.append(name)
variables.append(d)
def get_variables(panel,input_str):
v=fu.split_input(input_str)
names=[]
variables=[]
if not v is None:
for i in v:
add_variable(i, panel, names, variables)
if v is None or len(names)==0:
for i in panel.dataframe.keys():
add_variable(i, panel, names, variables)
n=len(names)
X=np.concatenate(variables,1)
names=np.array(names).reshape((1,n))
return X,names
def add_output(output_dict,name_list,name,table):
if type(table)==np.ndarray:
table=np.concatenate(([[''] for i in range(len(table))],table),1)
else:
for i in range(len(table)):
table[i]=['']+table[i]
output_dict[name]=table
name_list.append(name)
def get_list_dim(lst):
"""Returns 0 if not list, 1 if one dim and 2 if two or more dim. If higher than
2 dim are attemted to print, then each cell will contain an array. Works on lists and ndarray"""
if type(lst)==np.ndarray:
return min((len(lst.shape),2))
elif type(lst)==list:
for i in lst:
if type(i)!=list:
return 1
return 2
else:
return 0
def get_sign_codes(tsign):
sc=[]
for i in tsign:
if i<0.001:
sc.append('***')
elif i<0.01:
sc.append('** ')
elif i<0.05:
sc.append('* ')
elif i<0.1:
sc.append(' . ')
else:
sc.append('')
sc=np.array(sc,dtype='<U3')
return sc
def scatterplots(panel):
X_names=panel.X_names
Y_names=panel.Y_names
X=panel.raw_X
Y=panel.raw_Y
N,k=X.shape
for i in range(k):
fgr=plt.figure()
plt.scatter(X[:,i],Y[:,0], alpha=.1, s=10)
plt.ylabel(Y_names)
plt.xlabel(X_names[i])
xname=remove_illegal_signs(X_names[i])
fname=fu.obtain_fname('figures/%s-%s.png' %(Y_names,xname))
fgr.savefig(fname)
plt.close()
def remove_illegal_signs(name):
illegals=['#', '<', '$', '+',
'%', '>', '!', '`',
'&', '*', '‘', '|',
'{', '?', '“', '=',
'}', '/', ':',
'\\', 'b']
for i in illegals:
if i in name:
name=name.replace(i,'_')
return name
| gpl-3.0 |
jakevdp/seaborn | seaborn/rcmod.py | 1 | 15625 | """Functions that alter the matplotlib rc dictionary on the fly."""
import numpy as np
import matplotlib as mpl
from . import palettes
_style_keys = (
"axes.facecolor",
"axes.edgecolor",
"axes.grid",
"axes.axisbelow",
"axes.linewidth",
"axes.labelcolor",
"grid.color",
"grid.linestyle",
"text.color",
"xtick.color",
"ytick.color",
"xtick.direction",
"ytick.direction",
"xtick.major.size",
"ytick.major.size",
"xtick.minor.size",
"ytick.minor.size",
"legend.frameon",
"legend.numpoints",
"legend.scatterpoints",
"lines.solid_capstyle",
"image.cmap",
"font.family",
"font.sans-serif",
)
_context_keys = (
"figure.figsize",
"axes.labelsize",
"axes.titlesize",
"xtick.labelsize",
"ytick.labelsize",
"legend.fontsize",
"grid.linewidth",
"lines.linewidth",
"patch.linewidth",
"lines.markersize",
"lines.markeredgewidth",
"xtick.major.width",
"ytick.major.width",
"xtick.minor.width",
"ytick.minor.width",
"xtick.major.pad",
"ytick.major.pad"
)
def set(context="notebook", style="darkgrid", palette="deep",
font="sans-serif", font_scale=1, color_codes=False, rc=None):
"""Set aesthetic parameters in one step.
Each set of parameters can be set directly or temporarily, see the
referenced functions below for more information.
Parameters
----------
context : string or dict
Plotting context parameters, see :func:`plotting_context`
style : string or dict
Axes style parameters, see :func:`axes_style`
palette : string or sequence
Color palette, see :func:`color_palette`
font : string
Font family, see matplotlib font manager.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
color_codes : bool
If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
rc : dict or None
Dictionary of rc parameter mappings to override the above.
"""
set_context(context, font_scale)
set_style(style, rc={"font.family": font})
set_palette(palette, color_codes=color_codes)
if rc is not None:
mpl.rcParams.update(rc)
def reset_defaults():
"""Restore all RC params to default settings."""
mpl.rcParams.update(mpl.rcParamsDefault)
def reset_orig():
"""Restore all RC params to original settings (respects custom rc)."""
mpl.rcParams.update(mpl.rcParamsOrig)
class _AxesStyle(dict):
"""Light wrapper on a dict to set style temporarily."""
def __enter__(self):
"""Open the context."""
rc = mpl.rcParams
self._orig_style = {k: rc[k] for k in _style_keys}
set_style(self)
return self
def __exit__(self, *args):
"""Close the context."""
set_style(self._orig_style)
class _PlottingContext(dict):
"""Light wrapper on a dict to set context temporarily."""
def __enter__(self):
"""Open the context."""
rc = mpl.rcParams
self._orig_context = {k: rc[k] for k in _context_keys}
set_context(self)
return self
def __exit__(self, *args):
"""Close the context."""
set_context(self._orig_context)
def axes_style(style=None, rc=None):
"""Return a parameter dict for the aesthetic style of the plots.
This affects things like the color of the axes, whether a grid is
enabled by default, and other aesthetic elements.
This function returns an object that can be used in a ``with`` statement
to temporarily change the style parameters.
Parameters
----------
style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
A dictionary of parameters or the name of a preconfigured set.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
style dictionaries. This only updates parameters that are
considered part of the style definition.
Examples
--------
>>> st = axes_style("whitegrid")
>>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
>>> import matplotlib.pyplot as plt
>>> with axes_style("white"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_style : set the matplotlib parameters for a seaborn theme
plotting_context : return a parameter dict to to scale plot elements
color_palette : define the color palette for a plot
"""
if style is None:
style_dict = {k: mpl.rcParams[k] for k in _style_keys}
elif isinstance(style, dict):
style_dict = style
else:
styles = ["white", "dark", "whitegrid", "darkgrid", "ticks"]
if style not in styles:
raise ValueError("style must be one of %s" % ", ".join(styles))
# Define colors here
dark_gray = ".15"
light_gray = ".8"
# Common parameters
style_dict = {
"text.color": dark_gray,
"axes.labelcolor": dark_gray,
"legend.frameon": False,
"legend.numpoints": 1,
"legend.scatterpoints": 1,
"xtick.direction": "out",
"ytick.direction": "out",
"xtick.color": dark_gray,
"ytick.color": dark_gray,
"axes.axisbelow": True,
"image.cmap": "Greys",
"font.family": ["sans-serif"],
"font.sans-serif": ["Arial", "Liberation Sans",
"Bitstream Vera Sans", "sans-serif"],
"grid.linestyle": "-",
"lines.solid_capstyle": "round",
}
# Set grid on or off
if "grid" in style:
style_dict.update({
"axes.grid": True,
})
else:
style_dict.update({
"axes.grid": False,
})
# Set the color of the background, spines, and grids
if style.startswith("dark"):
style_dict.update({
"axes.facecolor": "#EAEAF2",
"axes.edgecolor": "white",
"axes.linewidth": 0,
"grid.color": "white",
})
elif style == "whitegrid":
style_dict.update({
"axes.facecolor": "white",
"axes.edgecolor": light_gray,
"axes.linewidth": 1,
"grid.color": light_gray,
})
elif style in ["white", "ticks"]:
style_dict.update({
"axes.facecolor": "white",
"axes.edgecolor": dark_gray,
"axes.linewidth": 1.25,
"grid.color": light_gray,
})
# Show or hide the axes ticks
if style == "ticks":
style_dict.update({
"xtick.major.size": 6,
"ytick.major.size": 6,
"xtick.minor.size": 3,
"ytick.minor.size": 3,
})
else:
style_dict.update({
"xtick.major.size": 0,
"ytick.major.size": 0,
"xtick.minor.size": 0,
"ytick.minor.size": 0,
})
# Override these settings with the provided rc dictionary
if rc is not None:
rc = {k: v for k, v in rc.items() if k in _style_keys}
style_dict.update(rc)
# Wrap in an _AxesStyle object so this can be used in a with statement
style_object = _AxesStyle(style_dict)
return style_object
def set_style(style=None, rc=None):
"""Set the aesthetic style of the plots.
This affects things like the color of the axes, whether a grid is
enabled by default, and other aesthetic elements.
Parameters
----------
style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
A dictionary of parameters or the name of a preconfigured set.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
style dictionaries. This only updates parameters that are
considered part of the style definition.
Examples
--------
>>> set_style("whitegrid")
>>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
See Also
--------
axes_style : return a dict of parameters or use in a ``with`` statement
to temporarily set the style.
set_context : set parameters to scale plot elements
set_palette : set the default color palette for figures
"""
style_object = axes_style(style, rc)
mpl.rcParams.update(style_object)
def plotting_context(context=None, font_scale=1, rc=None):
"""Return a parameter dict to scale elements of the figure.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
This function returns an object that can be used in a ``with`` statement
to temporarily change the context parameters.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> c = plotting_context("poster")
>>> c = plotting_context("notebook", font_scale=1.5)
>>> c = plotting_context("talk", rc={"lines.linewidth": 2})
>>> import matplotlib.pyplot as plt
>>> with plotting_context("paper"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_context : set the matplotlib parameters to scale plot elements
axes_style : return a dict of parameters defining a figure style
color_palette : define the color palette for a plot
"""
if context is None:
context_dict = {k: mpl.rcParams[k] for k in _context_keys}
elif isinstance(context, dict):
context_dict = context
else:
contexts = ["paper", "notebook", "talk", "poster"]
if context not in contexts:
raise ValueError("context must be in %s" % ", ".join(contexts))
# Set up dictionary of default parameters
base_context = {
"figure.figsize": np.array([8, 5.5]),
"axes.labelsize": 11,
"axes.titlesize": 12,
"xtick.labelsize": 10,
"ytick.labelsize": 10,
"legend.fontsize": 10,
"grid.linewidth": 1,
"lines.linewidth": 1.75,
"patch.linewidth": .3,
"lines.markersize": 7,
"lines.markeredgewidth": 0,
"xtick.major.width": 1,
"ytick.major.width": 1,
"xtick.minor.width": .5,
"ytick.minor.width": .5,
"xtick.major.pad": 7,
"ytick.major.pad": 7,
}
# Scale all the parameters by the same factor depending on the context
scaling = dict(paper=.8, notebook=1, talk=1.3, poster=1.6)[context]
context_dict = {k: v * scaling for k, v in base_context.items()}
# Now independently scale the fonts
font_keys = ["axes.labelsize", "axes.titlesize", "legend.fontsize",
"xtick.labelsize", "ytick.labelsize"]
font_dict = {k: context_dict[k] * font_scale for k in font_keys}
context_dict.update(font_dict)
# Implement hack workaround for matplotlib bug
# See https://github.com/mwaskom/seaborn/issues/344
# There is a bug in matplotlib 1.4.2 that makes points invisible when
# they don't have an edgewidth. It will supposedly be fixed in 1.4.3.
if mpl.__version__ == "1.4.2":
context_dict["lines.markeredgewidth"] = 0.01
# Override these settings with the provided rc dictionary
if rc is not None:
rc = {k: v for k, v in rc.items() if k in _context_keys}
context_dict.update(rc)
# Wrap in a _PlottingContext object so this can be used in a with statement
context_object = _PlottingContext(context_dict)
return context_object
def set_context(context=None, font_scale=1, rc=None):
"""Set the plotting context parameters.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> set_context("paper")
>>> set_context("talk", font_scale=1.4)
>>> set_context("talk", rc={"lines.linewidth": 2})
See Also
--------
plotting_context : return a dictionary of rc parameters, or use in
a ``with`` statement to temporarily set the context.
set_style : set the default parameters for figure style
set_palette : set the default color palette for figures
"""
context_object = plotting_context(context, font_scale, rc)
mpl.rcParams.update(context_object)
def set_palette(palette, n_colors=None, desat=None, color_codes=False):
"""Set the matplotlib color cycle using a seaborn palette.
Parameters
----------
palette : hls | husl | matplotlib colormap | seaborn color palette
Palette definition. Should be something that :func:`color_palette`
can process.
n_colors : int
Number of colors in the cycle. The default number of colors will depend
on the format of ``palette``, see the :func:`color_palette`
documentation for more information.
desat : float
Proportion to desaturate each color by.
color_codes : bool
If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
Examples
--------
>>> set_palette("Reds")
>>> set_palette("Set1", 8, .75)
See Also
--------
color_palette : build a color palette or set the color cycle temporarily
in a ``with`` statement.
set_context : set parameters to scale plot elements
set_style : set the default parameters for figure style
"""
colors = palettes.color_palette(palette, n_colors, desat)
mpl.rcParams["axes.color_cycle"] = list(colors)
mpl.rcParams["patch.facecolor"] = colors[0]
if color_codes:
palettes.set_color_codes(palette)
| bsd-3-clause |
botswana-harvard/bcpp-interview | bcpp_interview/management/commands/extract_from_bcpp.py | 1 | 5914 | """
A few methods to help extract data from bcpp.
either you are on the bcpp server console or have a bcpp virtualenv and tunnel to bcpp mysql
To get locator data from bcpp:
from bcpp_interview import get_locator_dataframe, identity, decrypt_locator, get_consent_dataframe, decrypt_consent
df_locator = get_locator_dataframe()
decrypt_locator(df_locator) # takes a long time
df_consent = get_consent_dataframe()
decrypt_consent(df_consent) # takes a long time
"""
import pandas as pd
import numpy as np
from django.core.exceptions import ImproperlyConfigured
from edc.core.crypto_fields.classes import FieldCryptor
from bhp066.apps.bcpp_subject.models import SubjectConsent, SubjectLocator
from M2Crypto.RSA import RSAError
consent_columns = {
'subject_identifier': 'subject_identifier',
'first_name': 'first_name',
'last_name': 'last_name',
'identity': 'identity',
'gender': 'gender',
'dob': 'dob',
'household_member__household_structure__household__plot__plot_identifier': 'plot_identifier',
'household_member__household_structure__household__plot__gps_target_lat': 'gps_target_lat',
'household_member__household_structure__household__plot__gps_target_lon': 'gps_target_lon',
}
locator_columns = {
'subject_visit__household_member__registered_subject__subject_identifier': 'subject_identifier',
'alt_contact_cell': 'alt_contact_cell',
'alt_contact_cell_number': 'alt_contact_cell_number',
'alt_contact_name': 'alt_contact_name',
'alt_contact_rel': 'alt_contact_rel',
'alt_contact_tel': 'alt_contact_tel',
'consent_version': 'consent_version',
'contact_cell': 'contact_cell',
'contact_name': 'contact_name',
'contact_phone': 'contact_phone',
'contact_physical_address': 'contact_physical_address',
'contact_rel': 'contact_rel',
'date_signed': 'date_signed',
'has_alt_contact': 'has_alt_contact',
'home_visit_permission': 'home_visit_permission',
'mail_address': 'mail_address',
'may_call_work': 'may_call_work',
'may_contact_someone': 'may_contact_someone',
'may_follow_up': 'may_follow_up',
'may_sms_follow_up': 'may_sms_follow_up',
'other_alt_contact_cell': 'other_alt_contact_cell',
'physical_address': 'physical_address',
'report_datetime': 'report_datetime',
'subject_cell': 'subject_cell',
'subject_cell_alt': 'subject_cell_alt',
'subject_phone': 'subject_phone',
'subject_phone_alt': 'subject_phone_alt',
'subject_visit': 'subject_visit',
'subject_work_phone': 'subject_work_phone',
'subject_work_place': 'subject_work_place'}
consent_encrypted_columns = {
'first_name': ['rsa', 'local'],
'last_name': ['rsa', 'restricted'],
'identity': ['rsa', 'restricted'],
'gps_target_lat': ['rsa', 'local'],
'gps_target_lon': ['rsa', 'local'],
}
locator_encrypted_columns = {
'alt_contact_cell_number': ['rsa', 'local'],
'alt_contact_name': ['rsa', 'local'],
'alt_contact_rel': ['rsa', 'local'],
'alt_contact_cell': ['rsa', 'local'],
'other_alt_contact_cell': ['rsa', 'local'],
'alt_contact_tel': ['rsa', 'local'],
'mail_address': ['aes', 'local'],
'physical_address': ['aes', 'local'],
'subject_cell': ['rsa', 'local'],
'subject_cell_alt': ['rsa', 'local'],
'subject_phone': ['rsa', 'local'],
'subject_phone_alt': ['rsa', 'local'],
'subject_work_place': ['aes', 'local'],
'subject_work_phone': ['rsa', 'local'],
'contact_name': ['rsa', 'local'],
'contact_rel': ['rsa', 'local'],
'contact_physical_address': ['aes', 'local'],
'contact_cell': ['rsa', 'local'],
'contact_phone': ['rsa', 'local'],
}
def get_consent_dataframe():
"""Return the SubjectConsent data as a dataframe (encrypted fields still encrypted)."""
qs = SubjectConsent.objects.all()
qs = qs.values_list(*consent_columns.keys())
dataframe = pd.DataFrame(list(qs), columns=consent_columns.keys())
dataframe.rename(columns=consent_columns, inplace=True)
dataframe.fillna(value=np.nan, inplace=True)
for column in list(dataframe.select_dtypes(include=['datetime64[ns, UTC]']).columns):
dataframe[column] = dataframe[column].astype('datetime64[ns]')
return dataframe
def get_locator_dataframe():
"""Return the SubjectLocator data as a dataframe (encrypted fields still encrypted)."""
qs = SubjectLocator.objects.all()
qs = qs.values_list(*locator_columns.keys())
dataframe = pd.DataFrame(list(qs), columns=locator_columns.keys())
dataframe.rename(columns=locator_columns, inplace=True)
dataframe.fillna(value=np.nan, inplace=True)
for column in list(dataframe.select_dtypes(include=['datetime64[ns, UTC]']).columns):
dataframe[column] = dataframe[column].astype('datetime64[ns]')
return dataframe
def decrypt_by_column(df, columns):
"""Return the dataframe with columns decrypted."""
for column, algorithm in columns.items():
df[column] = df.apply(lambda row: decrypt(row, algorithm, column_name=column), axis=1)
return df
def decrypt_locator(df):
"""Return the locator dataframe with columns decrypted."""
return decrypt_by_column(df, locator_encrypted_columns)
def decrypt_consent(df):
"""Return the locator dataframe with columns decrypted."""
return decrypt_by_column(df, consent_encrypted_columns)
def decrypt(row, algorithm, column_name):
value = np.nan
if pd.notnull(row[column_name]):
field_cryptor = FieldCryptor(*algorithm)
try:
value = field_cryptor.decrypt(row[column_name])
if value.startswith('enc1::'):
raise ImproperlyConfigured(
'Cannot decrypt identity, specify path to the encryption keys in settings.KEYPATH')
except RSAError:
value = row[column_name]
print('RSAError', column_name, algorithm)
return value
| gpl-3.0 |
marqh/iris | lib/iris/tests/test_plot.py | 3 | 32979 | # (C) British Crown Copyright 2010 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
from functools import wraps
import types
import warnings
import cf_units
import numpy as np
import iris
import iris.coords as coords
import iris.tests.stock
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
import iris.plot as iplt
import iris.quickplot as qplt
import iris.symbols
@tests.skip_data
def simple_cube():
cube = iris.tests.stock.realistic_4d()
cube = cube[:, 0, 0, :]
cube.coord('time').guess_bounds()
return cube
@tests.skip_plot
class TestSimple(tests.GraphicsTest):
def test_points(self):
cube = simple_cube()
qplt.contourf(cube)
self.check_graphic()
def test_bounds(self):
cube = simple_cube()
qplt.pcolor(cube)
self.check_graphic()
@tests.skip_plot
class TestMissingCoord(tests.GraphicsTest):
def _check(self, cube):
qplt.contourf(cube)
self.check_graphic()
qplt.pcolor(cube)
self.check_graphic()
def test_no_u(self):
cube = simple_cube()
cube.remove_coord('grid_longitude')
self._check(cube)
def test_no_v(self):
cube = simple_cube()
cube.remove_coord('time')
self._check(cube)
def test_none(self):
cube = simple_cube()
cube.remove_coord('grid_longitude')
cube.remove_coord('time')
self._check(cube)
@tests.skip_data
@tests.skip_plot
class TestMissingCS(tests.GraphicsTest):
@tests.skip_data
def test_missing_cs(self):
cube = tests.stock.simple_pp()
cube.coord("latitude").coord_system = None
cube.coord("longitude").coord_system = None
qplt.contourf(cube)
qplt.plt.gca().coastlines()
self.check_graphic()
@tests.skip_plot
@tests.skip_data
class TestHybridHeight(tests.GraphicsTest):
def setUp(self):
super(TestHybridHeight, self).setUp()
self.cube = iris.tests.stock.realistic_4d()[0, :15, 0, :]
def _check(self, plt_method, test_altitude=True):
plt_method(self.cube)
self.check_graphic()
plt_method(self.cube, coords=['level_height', 'grid_longitude'])
self.check_graphic()
plt_method(self.cube, coords=['grid_longitude', 'level_height'])
self.check_graphic()
if test_altitude:
plt_method(self.cube, coords=['grid_longitude', 'altitude'])
self.check_graphic()
plt_method(self.cube, coords=['altitude', 'grid_longitude'])
self.check_graphic()
def test_points(self):
self._check(qplt.contourf)
def test_bounds(self):
self._check(qplt.pcolor, test_altitude=False)
def test_orography(self):
qplt.contourf(self.cube)
iplt.orography_at_points(self.cube)
iplt.points(self.cube)
self.check_graphic()
coords = ['altitude', 'grid_longitude']
qplt.contourf(self.cube, coords=coords)
iplt.orography_at_points(self.cube, coords=coords)
iplt.points(self.cube, coords=coords)
self.check_graphic()
# TODO: Test bounds once they are supported.
with self.assertRaises(NotImplementedError):
qplt.pcolor(self.cube)
iplt.orography_at_bounds(self.cube)
iplt.outline(self.cube)
self.check_graphic()
@tests.skip_plot
@tests.skip_data
class Test1dPlotMultiArgs(tests.GraphicsTest):
# tests for iris.plot using multi-argument calling convention
def setUp(self):
super(Test1dPlotMultiArgs, self).setUp()
self.cube1d = _load_4d_testcube()[0, :, 0, 0]
self.draw_method = iplt.plot
def test_cube(self):
# just plot a cube against its dim coord
self.draw_method(self.cube1d) # altitude vs temp
self.check_graphic()
def test_coord(self):
# plot the altitude coordinate
self.draw_method(self.cube1d.coord('altitude'))
self.check_graphic()
def test_coord_cube(self):
# plot temperature against sigma
self.draw_method(self.cube1d.coord('sigma'), self.cube1d)
self.check_graphic()
def test_cube_coord(self):
# plot a vertical profile of temperature
self.draw_method(self.cube1d, self.cube1d.coord('altitude'))
self.check_graphic()
def test_coord_coord(self):
# plot two coordinates that are not mappable
self.draw_method(self.cube1d.coord('sigma'),
self.cube1d.coord('altitude'))
self.check_graphic()
def test_coord_coord_map(self):
# plot lat-lon aux coordinates of a trajectory, which draws a map
lon = iris.coords.AuxCoord([0, 5, 10, 15, 20, 25, 30, 35, 40, 45],
standard_name='longitude',
units='degrees_north')
lat = iris.coords.AuxCoord([45, 55, 50, 60, 55, 65, 60, 70, 65, 75],
standard_name='latitude',
units='degrees_north')
self.draw_method(lon, lat)
plt.gca().coastlines()
self.check_graphic()
def test_cube_cube(self):
# plot two phenomena against eachother, in this case just dummy data
cube1 = self.cube1d.copy()
cube2 = self.cube1d.copy()
cube1.rename('some phenomenon')
cube2.rename('some other phenomenon')
cube1.units = cf_units.Unit('no_unit')
cube2.units = cf_units.Unit('no_unit')
cube1.data[:] = np.linspace(0, 1, 7)
cube2.data[:] = np.exp(cube1.data)
self.draw_method(cube1, cube2)
self.check_graphic()
def test_incompatible_objects(self):
# incompatible objects (not the same length) should raise an error
with self.assertRaises(ValueError):
self.draw_method(self.cube1d.coord('time'), (self.cube1d))
def test_multimidmensional(self):
# multidimensional cubes are not allowed
cube = _load_4d_testcube()[0, :, :, 0]
with self.assertRaises(ValueError):
self.draw_method(cube)
def test_not_cube_or_coord(self):
# inputs must be cubes or coordinates, otherwise an error should be
# raised
xdim = np.arange(self.cube1d.shape[0])
with self.assertRaises(TypeError):
self.draw_method(xdim, self.cube1d)
def test_plot_old_coords_kwarg(self):
# Coords used to be a valid kwarg to plot, but it was deprecated and
# we are maintaining a reasonable exception, check that it is raised
# here.
with self.assertRaises(TypeError):
self.draw_method(self.cube1d, coords=None)
@tests.skip_plot
class Test1dQuickplotPlotMultiArgs(Test1dPlotMultiArgs):
# tests for iris.plot using multi-argument calling convention
def setUp(self):
tests.GraphicsTest.setUp(self)
self.cube1d = _load_4d_testcube()[0, :, 0, 0]
self.draw_method = qplt.plot
@tests.skip_data
@tests.skip_plot
class Test1dScatter(tests.GraphicsTest):
def setUp(self):
super(Test1dScatter, self).setUp()
self.cube = iris.load_cube(
tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),
'Temperature')
self.draw_method = iplt.scatter
def test_coord_coord(self):
x = self.cube.coord('longitude')
y = self.cube.coord('altitude')
c = self.cube.data
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_coord_coord_map(self):
x = self.cube.coord('longitude')
y = self.cube.coord('latitude')
c = self.cube.data
self.draw_method(x, y, c=c, edgecolor='none')
plt.gca().coastlines()
self.check_graphic()
def test_coord_cube(self):
x = self.cube.coord('latitude')
y = self.cube
c = self.cube.coord('Travel Time').points
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_cube_coord(self):
x = self.cube
y = self.cube.coord('altitude')
c = self.cube.coord('Travel Time').points
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_cube_cube(self):
x = iris.load_cube(
tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),
'Rel Humidity')
y = self.cube
c = self.cube.coord('Travel Time').points
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_incompatible_objects(self):
# cubes/coordinates of different sizes cannot be plotted
x = self.cube
y = self.cube.coord('altitude')[:-1]
with self.assertRaises(ValueError):
self.draw_method(x, y)
def test_multidimensional(self):
# multidimensional cubes/coordinates are not allowed
x = _load_4d_testcube()[0, :, :, 0]
y = x.coord('model_level_number')
with self.assertRaises(ValueError):
self.draw_method(x, y)
def test_not_cube_or_coord(self):
# inputs must be cubes or coordinates
x = np.arange(self.cube.shape[0])
y = self.cube
with self.assertRaises(TypeError):
self.draw_method(x, y)
@tests.skip_data
@tests.skip_plot
class Test1dQuickplotScatter(Test1dScatter):
def setUp(self):
tests.GraphicsTest.setUp(self)
self.cube = iris.load_cube(
tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),
'Temperature')
self.draw_method = qplt.scatter
@tests.skip_data
@tests.skip_plot
class TestAttributePositive(tests.GraphicsTest):
def test_1d_positive_up(self):
path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))
cube = iris.load_cube(path)
qplt.plot(cube.coord('depth'), cube[0, :, 60, 80])
self.check_graphic()
def test_1d_positive_down(self):
path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))
cube = iris.load_cube(path)
qplt.plot(cube[0, :, 60, 80], cube.coord('depth'))
self.check_graphic()
def test_2d_positive_up(self):
path = tests.get_data_path(('NetCDF', 'testing',
'small_theta_colpex.nc'))
cube = iris.load_cube(path, 'air_potential_temperature')[0, :, 42, :]
qplt.pcolormesh(cube)
self.check_graphic()
def test_2d_positive_down(self):
path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))
cube = iris.load_cube(path)[0, :, 42, :]
qplt.pcolormesh(cube)
self.check_graphic()
# Caches _load_4d_testcube so subsequent calls are faster
def cache(fn, cache={}):
def inner(*args, **kwargs):
key = fn.__name__
if key not in cache:
cache[key] = fn(*args, **kwargs)
return cache[key]
return inner
@cache
@tests.skip_data
def _load_4d_testcube():
# Load example 4d data (TZYX).
test_cube = iris.tests.stock.realistic_4d()
# Replace forecast_period coord with a multi-valued version.
time_coord = test_cube.coord('time')
n_times = len(time_coord.points)
forecast_dims = test_cube.coord_dims(time_coord)
test_cube.remove_coord('forecast_period')
# Make up values (including bounds), to roughly match older testdata.
point_values = np.linspace((1 + 1.0 / 6), 2.0, n_times)
point_uppers = point_values + (point_values[1] - point_values[0])
bound_values = np.column_stack([point_values, point_uppers])
# NOTE: this must be a DimCoord
# - an equivalent AuxCoord produces different plots.
new_forecast_coord = iris.coords.DimCoord(
points=point_values,
bounds=bound_values,
standard_name='forecast_period',
units=cf_units.Unit('hours')
)
test_cube.add_aux_coord(new_forecast_coord, forecast_dims)
# Heavily reduce dimensions for faster testing.
# NOTE: this makes ZYX non-contiguous. Doesn't seem to matter for now.
test_cube = test_cube[:, ::10, ::10, ::10]
return test_cube
@cache
def _load_wind_no_bounds():
# Load the COLPEX data => TZYX
path = tests.get_data_path(('PP', 'COLPEX', 'small_eastward_wind.pp'))
wind = iris.load_cube(path, 'x_wind')
# Remove bounds from all coords that have them.
wind.coord('grid_latitude').bounds = None
wind.coord('grid_longitude').bounds = None
wind.coord('level_height').bounds = None
wind.coord('sigma').bounds = None
return wind[:, :, :50, :50]
def _time_series(src_cube):
# Until we have plotting support for multiple axes on the same dimension,
# remove the time coordinate and its axis.
cube = src_cube.copy()
cube.remove_coord('time')
return cube
def _date_series(src_cube):
# Until we have plotting support for multiple axes on the same dimension,
# remove the forecast_period coordinate and its axis.
cube = src_cube.copy()
cube.remove_coord('forecast_period')
return cube
@tests.skip_plot
class SliceMixin(object):
"""Mixin class providing tests for each 2-dimensional permutation of axes.
Requires self.draw_method to be the relevant plotting function,
and self.results to be a dictionary containing the desired test results."""
def test_yx(self):
cube = self.wind[0, 0, :, :]
self.draw_method(cube)
self.check_graphic()
def test_zx(self):
cube = self.wind[0, :, 0, :]
self.draw_method(cube)
self.check_graphic()
def test_tx(self):
cube = _time_series(self.wind[:, 0, 0, :])
self.draw_method(cube)
self.check_graphic()
def test_zy(self):
cube = self.wind[0, :, :, 0]
self.draw_method(cube)
self.check_graphic()
def test_ty(self):
cube = _time_series(self.wind[:, 0, :, 0])
self.draw_method(cube)
self.check_graphic()
def test_tz(self):
cube = _time_series(self.wind[:, :, 0, 0])
self.draw_method(cube)
self.check_graphic()
@tests.skip_data
class TestContour(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.contour routine."""
def setUp(self):
super(TestContour, self).setUp()
self.wind = _load_4d_testcube()
self.draw_method = iplt.contour
@tests.skip_data
class TestContourf(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.contourf routine."""
def setUp(self):
super(TestContourf, self).setUp()
self.wind = _load_4d_testcube()
self.draw_method = iplt.contourf
@tests.skip_data
class TestPcolor(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.pcolor routine."""
def setUp(self):
super(TestPcolor, self).setUp()
self.wind = _load_4d_testcube()
self.draw_method = iplt.pcolor
@tests.skip_data
class TestPcolormesh(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.pcolormesh routine."""
def setUp(self):
super(TestPcolormesh, self).setUp()
self.wind = _load_4d_testcube()
self.draw_method = iplt.pcolormesh
def check_warnings(method):
"""
Decorator that adds a catch_warnings and filter to assert
the method being decorated issues a UserWarning.
"""
@wraps(method)
def decorated_method(self, *args, **kwargs):
# Force reset of iris.coords warnings registry to avoid suppression of
# repeated warnings. warnings.resetwarnings() does not do this.
if hasattr(coords, '__warningregistry__'):
coords.__warningregistry__.clear()
# Check that method raises warning.
with warnings.catch_warnings():
warnings.simplefilter("error")
with self.assertRaises(UserWarning):
return method(self, *args, **kwargs)
return decorated_method
def ignore_warnings(method):
"""
Decorator that adds a catch_warnings and filter to suppress
any warnings issues by the method being decorated.
"""
@wraps(method)
def decorated_method(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return method(self, *args, **kwargs)
return decorated_method
class CheckForWarningsMetaclass(type):
"""
Metaclass that adds a further test for each base class test
that checks that each test raises a UserWarning. Each base
class test is then overriden to ignore warnings in order to
check the underlying functionality.
"""
def __new__(cls, name, bases, local):
def add_decorated_methods(attr_dict, target_dict, decorator):
for key, value in attr_dict.items():
if (isinstance(value, types.FunctionType) and
key.startswith('test')):
new_key = '_'.join((key, decorator.__name__))
if new_key not in target_dict:
wrapped = decorator(value)
wrapped.__name__ = new_key
target_dict[new_key] = wrapped
else:
raise RuntimeError('A attribute called {!r} '
'already exists.'.format(new_key))
def override_with_decorated_methods(attr_dict, target_dict,
decorator):
for key, value in attr_dict.items():
if (isinstance(value, types.FunctionType) and
key.startswith('test')):
target_dict[key] = decorator(value)
# Add decorated versions of base methods
# to check for warnings.
for base in bases:
add_decorated_methods(base.__dict__, local, check_warnings)
# Override base methods to ignore warnings.
for base in bases:
override_with_decorated_methods(base.__dict__, local,
ignore_warnings)
return type.__new__(cls, name, bases, local)
@tests.skip_data
@tests.iristest_timing_decorator
class TestPcolorNoBounds(six.with_metaclass(CheckForWarningsMetaclass,
tests.GraphicsTest_nometa,
SliceMixin)):
"""
Test the iris.plot.pcolor routine on a cube with coordinates
that have no bounds.
"""
def setUp(self):
super(TestPcolorNoBounds, self).setUp()
self.wind = _load_wind_no_bounds()
self.draw_method = iplt.pcolor
@tests.skip_data
@tests.iristest_timing_decorator
class TestPcolormeshNoBounds(six.with_metaclass(CheckForWarningsMetaclass,
tests.GraphicsTest_nometa,
SliceMixin)):
"""
Test the iris.plot.pcolormesh routine on a cube with coordinates
that have no bounds.
"""
def setUp(self):
super(TestPcolormeshNoBounds, self).setUp()
self.wind = _load_wind_no_bounds()
self.draw_method = iplt.pcolormesh
@tests.skip_plot
class Slice1dMixin(object):
"""Mixin class providing tests for each 1-dimensional permutation of axes.
Requires self.draw_method to be the relevant plotting function,
and self.results to be a dictionary containing the desired test results."""
def test_x(self):
cube = self.wind[0, 0, 0, :]
self.draw_method(cube)
self.check_graphic()
def test_y(self):
cube = self.wind[0, 0, :, 0]
self.draw_method(cube)
self.check_graphic()
def test_z(self):
cube = self.wind[0, :, 0, 0]
self.draw_method(cube)
self.check_graphic()
def test_t(self):
cube = _time_series(self.wind[:, 0, 0, 0])
self.draw_method(cube)
self.check_graphic()
def test_t_dates(self):
cube = _date_series(self.wind[:, 0, 0, 0])
self.draw_method(cube)
plt.gcf().autofmt_xdate()
plt.xlabel('Phenomenon time')
self.check_graphic()
@tests.skip_data
class TestPlot(tests.GraphicsTest, Slice1dMixin):
"""Test the iris.plot.plot routine."""
def setUp(self):
super(TestPlot, self).setUp()
self.wind = _load_4d_testcube()
self.draw_method = iplt.plot
@tests.skip_data
class TestQuickplotPlot(tests.GraphicsTest, Slice1dMixin):
"""Test the iris.quickplot.plot routine."""
def setUp(self):
super(TestQuickplotPlot, self).setUp()
self.wind = _load_4d_testcube()
self.draw_method = qplt.plot
_load_cube_once_cache = {}
def load_cube_once(filename, constraint):
"""Same syntax as load_cube, but will only load a file once,
then cache the answer in a dictionary.
"""
global _load_cube_once_cache
key = (filename, str(constraint))
cube = _load_cube_once_cache.get(key, None)
if cube is None:
cube = iris.load_cube(filename, constraint)
_load_cube_once_cache[key] = cube
return cube
class LambdaStr(object):
"""Provides a callable function which has a sensible __repr__."""
def __init__(self, repr, lambda_fn):
self.repr = repr
self.lambda_fn = lambda_fn
def __call__(self, *args, **kwargs):
return self.lambda_fn(*args, **kwargs)
def __repr__(self):
return self.repr
@tests.skip_data
@tests.skip_plot
class TestPlotCoordinatesGiven(tests.GraphicsTest):
def setUp(self):
super(TestPlotCoordinatesGiven, self).setUp()
filename = tests.get_data_path(('PP', 'COLPEX',
'theta_and_orog_subset.pp'))
self.cube = load_cube_once(filename, 'air_potential_temperature')
self.draw_module = iris.plot
self.contourf = LambdaStr('iris.plot.contourf',
lambda cube, *args, **kwargs:
iris.plot.contourf(cube, *args, **kwargs))
self.contour = LambdaStr('iris.plot.contour',
lambda cube, *args, **kwargs:
iris.plot.contour(cube, *args, **kwargs))
self.points = LambdaStr('iris.plot.points',
lambda cube, *args, **kwargs:
iris.plot.points(cube, c=cube.data,
*args, **kwargs))
self.plot = LambdaStr('iris.plot.plot',
lambda cube, *args, **kwargs:
iris.plot.plot(cube, *args, **kwargs))
self.results = {'yx': ([self.contourf, ['grid_latitude',
'grid_longitude']],
[self.contourf, ['grid_longitude',
'grid_latitude']],
[self.contour, ['grid_latitude',
'grid_longitude']],
[self.contour, ['grid_longitude',
'grid_latitude']],
[self.points, ['grid_latitude',
'grid_longitude']],
[self.points, ['grid_longitude',
'grid_latitude']],),
'zx': ([self.contourf, ['model_level_number',
'grid_longitude']],
[self.contourf, ['grid_longitude',
'model_level_number']],
[self.contour, ['model_level_number',
'grid_longitude']],
[self.contour, ['grid_longitude',
'model_level_number']],
[self.points, ['model_level_number',
'grid_longitude']],
[self.points, ['grid_longitude',
'model_level_number']],),
'tx': ([self.contourf, ['time', 'grid_longitude']],
[self.contourf, ['grid_longitude', 'time']],
[self.contour, ['time', 'grid_longitude']],
[self.contour, ['grid_longitude', 'time']],
[self.points, ['time', 'grid_longitude']],
[self.points, ['grid_longitude', 'time']],),
'x': ([self.plot, ['grid_longitude']],),
'y': ([self.plot, ['grid_latitude']],)
}
def draw(self, draw_method, *args, **kwargs):
draw_fn = getattr(self.draw_module, draw_method)
draw_fn(*args, **kwargs)
self.check_graphic()
def run_tests(self, cube, results):
for draw_method, coords in results:
draw_method(cube, coords=coords)
try:
self.check_graphic()
except AssertionError as err:
self.fail('Draw method %r failed with coords: %r. '
'Assertion message: %s' % (draw_method, coords, err))
def run_tests_1d(self, cube, results):
# there is a different calling convention for 1d plots
for draw_method, coords in results:
draw_method(cube.coord(coords[0]), cube)
try:
self.check_graphic()
except AssertionError as err:
msg = 'Draw method {!r} failed with coords: {!r}. ' \
'Assertion message: {!s}'
self.fail(msg.format(draw_method, coords, err))
def test_yx(self):
test_cube = self.cube[0, 0, :, :]
self.run_tests(test_cube, self.results['yx'])
def test_zx(self):
test_cube = self.cube[0, :15, 0, :]
self.run_tests(test_cube, self.results['zx'])
def test_tx(self):
test_cube = self.cube[:, 0, 0, :]
self.run_tests(test_cube, self.results['tx'])
def test_x(self):
test_cube = self.cube[0, 0, 0, :]
self.run_tests_1d(test_cube, self.results['x'])
def test_y(self):
test_cube = self.cube[0, 0, :, 0]
self.run_tests_1d(test_cube, self.results['y'])
def test_badcoords(self):
cube = self.cube[0, 0, :, :]
draw_fn = getattr(self.draw_module, 'contourf')
self.assertRaises(ValueError, draw_fn, cube,
coords=['grid_longitude', 'grid_longitude'])
self.assertRaises(ValueError, draw_fn, cube,
coords=['grid_longitude', 'grid_longitude',
'grid_latitude'])
self.assertRaises(iris.exceptions.CoordinateNotFoundError, draw_fn,
cube, coords=['grid_longitude', 'wibble'])
self.assertRaises(ValueError, draw_fn, cube, coords=[])
self.assertRaises(ValueError, draw_fn, cube,
coords=[cube.coord('grid_longitude'),
cube.coord('grid_longitude')])
self.assertRaises(ValueError, draw_fn, cube,
coords=[cube.coord('grid_longitude'),
cube.coord('grid_longitude'),
cube.coord('grid_longitude')])
def test_non_cube_coordinate(self):
cube = self.cube[0, :, :, 0]
pts = -100 + np.arange(cube.shape[1]) * 13
x = coords.DimCoord(pts, standard_name='model_level_number',
attributes={'positive': 'up'})
self.draw('contourf', cube, coords=['grid_latitude', x])
@tests.skip_data
@tests.skip_plot
class TestPlotDimAndAuxCoordsKwarg(tests.GraphicsTest):
def setUp(self):
super(TestPlotDimAndAuxCoordsKwarg, self).setUp()
filename = tests.get_data_path(('NetCDF', 'rotated', 'xy',
'rotPole_landAreaFraction.nc'))
self.cube = iris.load_cube(filename)
def test_default(self):
iplt.contourf(self.cube)
plt.gca().coastlines()
self.check_graphic()
def test_coords(self):
# Pass in dimension coords.
rlat = self.cube.coord('grid_latitude')
rlon = self.cube.coord('grid_longitude')
iplt.contourf(self.cube, coords=[rlon, rlat])
plt.gca().coastlines()
self.check_graphic()
# Pass in auxiliary coords.
lat = self.cube.coord('latitude')
lon = self.cube.coord('longitude')
iplt.contourf(self.cube, coords=[lon, lat])
plt.gca().coastlines()
self.check_graphic()
def test_coord_names(self):
# Pass in names of dimension coords.
iplt.contourf(self.cube, coords=['grid_longitude', 'grid_latitude'])
plt.gca().coastlines()
self.check_graphic()
# Pass in names of auxiliary coords.
iplt.contourf(self.cube, coords=['longitude', 'latitude'])
plt.gca().coastlines()
self.check_graphic()
def test_yx_order(self):
# Do not attempt to draw coastlines as it is not a map.
iplt.contourf(self.cube, coords=['grid_latitude', 'grid_longitude'])
self.check_graphic()
iplt.contourf(self.cube, coords=['latitude', 'longitude'])
self.check_graphic()
@tests.skip_plot
class TestSymbols(tests.GraphicsTest):
def test_cloud_cover(self):
iplt.symbols(list(range(10)),
[0] * 10,
[iris.symbols.CLOUD_COVER[i] for i in range(10)],
0.375)
iplt.plt.axis('off')
self.check_graphic()
@tests.skip_plot
class TestPlottingExceptions(tests.IrisTest):
def setUp(self):
self.bounded_cube = tests.stock.lat_lon_cube()
self.bounded_cube.coord("latitude").guess_bounds()
self.bounded_cube.coord("longitude").guess_bounds()
def test_boundmode_multidim(self):
# Test exception translation.
# We can't get contiguous bounded grids from multi-d coords.
cube = self.bounded_cube
cube.remove_coord("latitude")
cube.add_aux_coord(coords.AuxCoord(points=cube.data,
standard_name='latitude',
units='degrees'), [0, 1])
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=['longitude', 'latitude'])
def test_boundmode_4bounds(self):
# Test exception translation.
# We can only get contiguous bounded grids with 2 bounds per point.
cube = self.bounded_cube
lat = coords.AuxCoord.from_coord(cube.coord("latitude"))
lat.bounds = np.array([lat.points, lat.points + 1,
lat.points + 2, lat.points + 3]).transpose()
cube.remove_coord("latitude")
cube.add_aux_coord(lat, 0)
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=['longitude', 'latitude'])
def test_different_coord_systems(self):
cube = self.bounded_cube
lat = cube.coord('latitude')
lon = cube.coord('longitude')
lat.coord_system = iris.coord_systems.GeogCS(7000000)
lon.coord_system = iris.coord_systems.GeogCS(7000001)
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=['longitude', 'latitude'])
@tests.skip_data
@tests.skip_plot
class TestPlotOtherCoordSystems(tests.GraphicsTest):
def test_plot_tmerc(self):
filename = tests.get_data_path(('NetCDF', 'transverse_mercator',
'tmean_1910_1910.nc'))
self.cube = iris.load_cube(filename)
iplt.pcolormesh(self.cube[0])
plt.gca().coastlines()
self.check_graphic()
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
citiususc/construe | construe/knowledge/abstraction_patterns/segmentation/pwave.py | 1 | 15026 | # -*- coding: utf-8 -*-
# pylint: disable=C0326
"""
Created on Tue Sep 30 12:31:04 2014
This module contains the definition of the P wave abstraction pattern.
@author: T. Teijeiro
"""
import pickle
import numpy as np
from pathlib import Path
import sklearn.preprocessing as preproc
import construe.knowledge.observables as o
import construe.utils.signal_processing.Douglas_Peucker as DP
import construe.knowledge.constants as C
import construe.acquisition.signal_buffer as sig_buf
from construe.utils.units_helper import (samples2msec as sp2ms,
phys2digital as ph2dg,
digital2phys as dg2ph)
from construe.model import verify, Interval as Iv
from construe.model.automata import PatternAutomata, ABSTRACTED, ENVIRONMENT
####################################################
### Definition of the P Wave abstraction pattern ###
####################################################
#Auxiliary functions
def _delimit_p(signal, lead, es_lim, ls_lim, ee_lim):
"""
Performs the delimitation of a P wave in a signal fragment. If a waveform
compatible with a P wave cannot be found, returns None, else return an
Interval within signal length.
"""
#shape simplification (ignoring the environment signal)
delta = ph2dg(0.02)
points = DP.arrayRDP(signal[int(es_lim):], delta, 6) + int(es_lim)
#If no relevant disturbances are detected, there is no a P wave
if len(points) == 2:
return None
#Now we look for the shorter limits that satisfy the P-Wave classifier.
cand = None
i = next(k for k in range(len(points)-1, -1, -1) if points[k] <= ls_lim)
while i >= 0:
j = next(k for k in range(i+1, len(points)) if points[k] >= ee_lim)
while j < len(points):
sigfr = signal[points[i]:points[j]+1]
#We consider a good P wave environment if the signal has no
#amplitude variations
beg = int(max(0, points[i]-C.PWAVE_ENV))
plainenv = not np.any(signal[beg:points[i]+1]-signal[beg])
#The minimum threshold varies with the environment quality
ampthres = C.PWAVE_MIN_AMP if not plainenv else delta
if (seems_pwave(sigfr, lead) and np.ptp(sigfr) >= ampthres):
cand = (points[i], points[j])
break
j += 1
if cand is not None:
break
i -= 1
return None if cand is None else Iv(int(cand[0]-es_lim),
int(cand[1]-es_lim))
def delineate_pwave(es_lim, ls_lim, ee_lim, le_lim, pwave):
"""
Performs the delineation of a possible P-wave contained in the given limits.
Parameters
----------
es_lim:
earliest possible time for the beginning of the P-wave.
ls_lim:
latest possible time for the beginning of the P-wave.
ee_lim:
earliest possible time for the ending of the P-wave.
le_lim:
latest possible time for the ending of the P-wave.
pwave:
PWave instance, **which is modified** to establish the amplitude in all
those leads in which the identification was correct.
Returns
-------
out:
Interval with the delineation of the p-wave, relative to *es_lim*. If
a p-wave cannot be delineated, returns None.
"""
start = finish = None
for lead in (l for l in C.PWAVE_LEADS if sig_buf.is_available(l)):
#We take some environment signal for the P wave.
beg = int(es_lim-C.PWAVE_ENV)
beg = 0 if beg < 0 else beg
sig = sig_buf.get_signal_fragment(beg, le_lim, lead=lead)[0]
endpoints = _delimit_p(sig, lead, es_lim-beg, ls_lim-beg, ee_lim-beg)
if endpoints is None:
continue
elif start is None:
start, finish = endpoints.start, endpoints.end
if finish > start:
pwave.amplitude[lead] = np.ptp(sig[start:finish+1])
else:
if abs(start - endpoints.start) < C.TMARGIN:
start = min(start, endpoints.start)
if abs(finish - endpoints.end) < C.TMARGIN:
finish = max(finish, endpoints.end)
if finish > start:
pwave.amplitude[lead] = np.ptp(sig[start:finish+1])
return None if start is None else Iv(start, finish)
def _p_qrs_tconst(pattern, qrs):
"""
Adds the temporal constraints wrt the qrs environment observation.
"""
pwave = pattern.hypothesis
#Temporal constraints
tnet = pattern.tnet
#P wave duration constraint
tnet.add_constraint(pwave.start, pwave.end, C.PW_DURATION)
#Relations between P wave and QRS complex
tnet.add_constraint(pwave.end, qrs.start, C.PQ_INTERVAL)
tnet.add_constraint(pwave.start, qrs.start, C.PR_INTERVAL)
def _p_defl_tconst(pattern, defl):
"""
Temporal constraints definition wrt the abstracted energy interval.
"""
pwave = pattern.hypothesis
qrs = None
if pattern.evidence[o.QRS]:
qrs = pattern.evidence[o.QRS][0]
#Temporal constraints
tnet = pattern.tnet
#P wave duration constraint
tnet.add_constraint(pwave.start, pwave.end, C.PW_DURATION)
#Constraints with the energy interval
tnet.add_constraint(defl.start, defl.end, C.PW_DEF_DUR)
tnet.add_constraint(pwave.start, defl.start, Iv(-C.PW_DEF_OVER,
C.PW_DEF_OVER))
tnet.add_constraint(pwave.end, defl.end,
Iv(-C.PW_DEF_OVER, C.PW_DEF_OVER))
tnet.set_before(defl.start, pwave.end)
tnet.set_before(pwave.start, defl.end)
if qrs is not None:
tnet.add_constraint(defl.start, qrs.start, C.PR_DEF_SEP)
tnet.add_constraint(defl.end, qrs.start, C.PQ_DEF_SEP)
tnet.set_before(defl.end, qrs.start)
def _p_gconst(pattern, defl):
"""
General constraints of the P Wave abstraction pattern, once all the
evidence has been observed.
"""
pwave = pattern.hypothesis
if ((defl is not None and defl.earlystart != defl.latestart)
or not pattern.evidence[o.QRS]):
return
qrs = pattern.evidence[o.QRS][0]
beg = pwave.earlystart
if beg < 0:
beg = 0
#We try the delineation of the P-Wave
endpoints = delineate_pwave(beg, int(pwave.latestart),
int(pwave.earlyend), int(pwave.lateend), pwave)
verify(endpoints is not None)
#Now we obtain the amplitudes, and we ensure the QRS amplitude is at
#least twice the P Wave amplitude in each lead
pwave.start.set(beg + endpoints.start, beg + endpoints.start)
pwave.end.set(beg + endpoints.end, beg + endpoints.end)
for lead in pwave.amplitude:
verify(pwave.amplitude[lead] <= C.PWAVE_AMP[lead])
verify(lead not in qrs.shape or
pwave.amplitude[lead] < qrs.shape[lead].amplitude)
#########################
## Automata definition ##
#########################
PWAVE_PATTERN = PatternAutomata()
PWAVE_PATTERN.name = 'P Wave'
PWAVE_PATTERN.Hypothesis = o.PWave
PWAVE_PATTERN.add_transition(0, 1, o.QRS, ENVIRONMENT, _p_qrs_tconst)
PWAVE_PATTERN.add_transition(1, 2, o.Deflection, ABSTRACTED, _p_defl_tconst,
_p_gconst)
#PWAVE_PATTERN.add_transition(1, 2, gconst=_p_gconst)
PWAVE_PATTERN.final_states.add(2)
PWAVE_PATTERN.freeze()
##################################################
### Statistical knowledge stored as histograms ###
##################################################
def _check_histogram(hist, value):
"""
Obtains a score of a value according to an histogram, between 0.0 and 1.0
"""
i = 0
while i < len(hist[1]) and value > hist[1][i]:
i += 1
return 0.0 if i == 0 or i == len(hist[1]) else hist[0][i-1]
#Static definition of the PR histogram
_PR_HIST = (np.array(
[0.20300752, 0.22932331, 0.32330827, 0.57142857, 0.66541353,
0.92857143, 0.84962406, 0.84210526, 0.87969925, 0.93984963,
1. , 0.77443609, 0.63533835, 0.51879699, 0.43609023,
0.48496241, 0.28947368, 0.29699248, 0.29323308, 0.27443609,
0.2556391 , 0.19172932, 0.20676692, 0.16165414, 0.17293233,
0.17293233, 0.10150376, 0.07518797, 0.07142857, 0.04323308,
0.01503759, 0.0112782 , 0.01503759, 0.02255639, 0.0075188 ,
0.0112782 , 0.0112782 , 0.01503759, 0.01503759, 0.01691729,
0.01879699, 0.0037594 , 0.03383459, 0.0075188 , 0.02631579,
0.03759398, 0.01879699, 0.04887218, 0.0112782 , 0.04511278]),
np.array(
[20. , 23.6, 27.2, 30.8, 34.4, 38. , 41.6, 45.2,
48.8, 52.4, 56. , 59.6, 63.2, 66.8, 70.4, 74. ,
77.6, 81.2, 84.8, 88.4, 92. , 95.6, 99.2, 102.8,
106.4, 110. , 113.6, 117.2, 120.8, 124.4, 128. , 131.6,
135.2, 138.8, 142.4, 146. , 149.6, 153.2, 156.8, 160.4,
164. , 167.6, 171.2, 174.8, 178.4, 182. , 185.6, 189.2,
192.8, 196.4, 200. ]))
#Static definition of the PQ histogram
_PQ_HIST = (np.array(
[0.07231405, 0.15495868, 0.4338843 , 0.58677686, 0.92768595,
1. , 0.94214876, 0.6714876 , 0.52066116, 0.39669421,
0.23760331, 0.1446281 , 0.14876033, 0.10330579, 0.08057851,
0.03512397, 0.02479339, 0.01239669, 0.01859504, 0.0268595 ,
0.01239669, 0.03099174, 0.00826446, 0.00413223, 0.00413223]),
np.array([ 80., 112., 124., 136., 148., 160., 172., 184., 196.,
208., 220., 232., 244., 256., 268., 280., 292., 304.,
316., 328., 340., 352., 364., 376., 388., 400.]))
#Static definition of the P wave duration histogram
_PDUR_HIST = (np.array(
[0.00410678, 0.0164271 , 0.05749487, 0.13552361, 0.24640657,
0.40246407, 0.6899384 , 0.78234086, 1. , 0.87474333,
0.69609856, 0.58316222, 0.33880903, 0.23819302, 0.17453799,
0.12936345, 0.07597536, 0.05338809, 0.01848049, 0.01232033,
0.0164271 , 0.00410678, 0.00410678, 0. , 0.00410678]),
np.array([44. , 51.84, 59.68, 67.52, 75.36, 83.2 , 91.04,
98.88, 106.72, 114.56, 122.4 , 130.24, 138.08, 145.92,
153.76, 161.6 , 169.44, 177.28, 185.12, 192.96, 200.8 ,
208.64, 216.48, 224.32, 232.16, 240. ]))
#################################
### P Wave signal classifiers ###
#################################
#We have one classifier for the limb leads, and other for precordial leads.
#The classifiers are one-class SVM trained with 73 clean records from the QT
#database. The features used are the 4 points in coordinates (X,Y) obtained
#from a RDP simplification of the signal delimiting the P wave, using 5 points
#for the simplification and assuming the first point is always (0,0). The
#units of the coordinates are in msec and mV.
def _scale_sample(signal, lead):
if not lead in _CL_MAP:
raise ValueError('Invalid lead.')
scaler = _SCALERS[_CL_MAP[lead]]
#The signal is converted to physical units, and the first point is (0,0)
sig = dg2ph(signal-signal[0])
#The features are the 4 points better representing the signal shape.
points = DP.arrayRDP(sig, 0.001, 5)[1:]
if len(points) < 4:
raise ValueError('Not enough points after path simplification')
sample = np.concatenate((sp2ms(points), sig[points])).reshape(1, -1)
return scaler.transform(sample)
def seems_pwave(signal, lead):
"""
Checks if a signal fragment looks like a P wave. It is assumed the signal
is in raw signal units at the record sampling frequency.
Parameters
----------
signal:
Raw signal array with the delimited P wave.
lead:
Lead where the signal has been obtained. It must be a limb
or a precordial lead.
"""
if not lead in _CL_MAP:
raise ValueError('Invalid lead.')
classifier = _CLASSIFIERS[_CL_MAP[lead]]
try:
sample = _scale_sample(signal, lead)
except ValueError:
return False
return classifier.predict(sample)[0] == 1
def pwave_distance(signal, lead):
"""
Obtains a distance measure of a signal fragment to a recognized P wave
morphology.
"""
if not lead in _CL_MAP:
raise ValueError('Invalid lead.')
classifier = _CLASSIFIERS[_CL_MAP[lead]]
try:
sample = _scale_sample(signal, lead)
except ValueError:
#Unknown values are located in the decision boundary
return 0.0
return classifier.decision_function(sample)[0][0]
#Mapping for each lead with the corresponding classifier.
_CL_MAP = {sig_buf.Leads.UNIQUE: 0, sig_buf.Leads.MLI: 0,
sig_buf.Leads.MLII: 0, sig_buf.Leads.MLIII: 0,
sig_buf.Leads.V1: 1, sig_buf.Leads.V2: 1,
sig_buf.Leads.V3: 1, sig_buf.Leads.V4: 1,
sig_buf.Leads.V5: 1, sig_buf.Leads.V6: 1}
#Each classifier has a scaler to preprocess the feature set.
_SCALERS = [preproc.StandardScaler(), preproc.StandardScaler()]
_SCALERS[0].mean_ = np.array(
[3.64100119e+01, 6.27794994e+01, 8.74159714e+01, 1.18181168e+02,
6.97318236e-02, 1.17550656e-01, 8.52205006e-02, -1.42669845e-02])
_SCALERS[1].mean_ = np.array(
[3.52004505e+01, 6.13603604e+01, 8.69031532e+01, 1.17867117e+02,
5.26295045e-02, 8.14245495e-02, 3.52759009e-02, -1.85416667e-02])
_std0 = np.array([18.28456548, 21.80939775, 25.19837448, 27.46293336,
0.06650826, 0.09788993, 0.09393059, 0.04712977])
_std1 = np.array([16.8934232 , 19.45625391, 23.22422215, 26.39513332,
0.05968984, 0.07814591, 0.08662095, 0.05071906])
#Fix for scikit-learn > 0.16
try:
_SCALERS[0].std_ = _std0
_SCALERS[1].std_ = _std1
_SCALERS[0].scale_ = _std0
_SCALERS[1].scale_ = _std1
except AttributeError:
_SCALERS[0].scale_ = _std0
_SCALERS[1].scale_ = _std1
# Trained classifiers. These classifiers were serialized using the pickle
# module. They are instances of sklearn.svm.OneClassSVM, and have been
# successfully tested with sklearn versions from 0.15 to 0.21.3
_localdir = Path(__file__).resolve().parent
with _localdir.joinpath('limb_pw_classifier.pickle').open('rb') as f:
_LIMB_CLS = pickle.load(f, encoding='latin1')
#Fix for scikit-learn >= 0.22
_LIMB_CLS._n_support = _LIMB_CLS.__dict__['n_support_']
with _localdir.joinpath('precordial_pw_classifier.pickle').open('rb') as f:
_PREC_CLS = pickle.load(f, encoding='latin1')
#Fix for scikit-learn >= 0.22
_PREC_CLS._n_support = _PREC_CLS.__dict__['n_support_']
_CLASSIFIERS = [_LIMB_CLS, _PREC_CLS]
| agpl-3.0 |
iABC2XYZ/abc | Temp/untitled14.py | 1 | 1845 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 10 17:39:17 2017
@author: A
"""
import numpy as np
import matplotlib.pyplot as plt
def ReconstructionFFT(s):
from scipy.fftpack import fft
fs=1
N=len(s)
#fs=N
n=np.linspace(0,N-1,N)
f=n*fs/N
t=n/fs
sFFT=fft(s,N)
magSFFT=np.abs(sFFT)
ASFFT=magSFFT/N
angleSFFT=np.angle(sFFT)
s0=np.zeros(N)
for iS in xrange(N):
s0+=ASFFT[iS]*np.cos(2*np.pi*f[iS]*t+angleSFFT[iS])
return s0
def ReconstructionANDPrediction(dataOrigin,lenPredict):
from scipy.fftpack import fft
fs=1
N=len(dataOrigin)
n=np.linspace(0,N-1+lenPredict,N+lenPredict)
f=n*fs/N
t=n/fs
sFFT=fft(dataOrigin,N)
magSFFT=np.abs(sFFT)
ASFFT=magSFFT/N
angleSFFT=np.angle(sFFT)
s0=np.zeros(N+lenPredict)
for iS in xrange(N):
s0+=ASFFT[iS]*np.cos(2*np.pi*f[iS]*t+angleSFFT[iS])
return s0
def ReconstructionANDPredictionNew(dataOrigin,lenPredict):
from scipy.fftpack import fft
fs=1
N=len(dataOrigin)+lenPredict
n=np.linspace(0,N-1,N)
f=n*fs/N
t=n/fs
sFFT=fft(dataOrigin,N)
magSFFT=np.abs(sFFT)
ASFFT=magSFFT/N
angleSFFT=np.angle(sFFT)
s0=np.zeros(N/2)
for iS in xrange(N):
s0+=ASFFT[iS]*np.cos(2*np.pi*f[iS]*t+angleSFFT[iS])
return 2*s0
x=np.random.rand(1024)
fs=1024
N=1024
n=np.linspace(0,N-1,N)
t=n/fs
x=2+3*np.cos(2*np.pi*50.*t+np.pi*30./180.)+1.5*np.cos(2*np.pi*175.*t+np.pi*70./180.)
plt.close('all')
plt.figure('R')
plt.plot(x,'.')
x0=ReconstructionFFT(x)
plt.figure('R2')
plt.plot(x,'b.')
plt.plot(x0,'.r')
xPredict= ReconstructionANDPrediction(x,1024)
plt.figure('Predict')
plt.plot(np.arange(1024),x,'b*')
plt.plot(np.arange(1024,2048,1),x,'b*')
plt.plot(xPredict,'ro')
| gpl-3.0 |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/scipy/stats/_multivariate.py | 13 | 99071 | #
# Author: Joris Vankerschaver 2013
#
from __future__ import division, print_function, absolute_import
import math
import numpy as np
import scipy.linalg
from scipy.misc import doccer
from scipy.special import gammaln, psi, multigammaln
from scipy._lib._util import check_random_state
from scipy.linalg.blas import drot
__all__ = ['multivariate_normal',
'matrix_normal',
'dirichlet',
'wishart',
'invwishart',
'special_ortho_group',
'ortho_group',
'random_correlation']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
_doc_random_state = """\
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
"""
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""
Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""
A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD(object):
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
class multi_rv_generic(object):
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super(multi_rv_generic, self).__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen(object):
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
_mvn_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_mvn_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_mvn_doc_frozen_callparams = ""
_mvn_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvn_docdict_params = {
'_mvn_doc_default_callparams': _mvn_doc_default_callparams,
'_mvn_doc_callparams_note': _mvn_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvn_docdict_noparams = {
'_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,
'_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_normal_gen(multi_rv_generic):
r"""
A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_mvn_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super(multivariate_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _process_parameters(self, dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be a scalar.")
# Check input sizes and return full arrays for mean and cov if necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." % dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean=None, cov=1, allow_singular=False):
"""
Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean=None, cov=1, allow_singular=False):
"""
Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_mvn_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self._dist = multivariate_normal_gen(seed)
self.dim, self.mean, self.cov = self._dist._process_parameters(
None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""
Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params)
_matnorm_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default: `None`)
rowcov : array_like, optional
Among-row covariance matrix of the distribution (default: `1`)
colcov : array_like, optional
Among-column covariance matrix of the distribution (default: `1`)
"""
_matnorm_doc_callparams_note = \
"""If `mean` is set to `None` then a matrix of zeros is used for the mean.
The dimensions of this matrix are inferred from the shape of `rowcov` and
`colcov`, if these are provided, or set to `1` if ambiguous.
`rowcov` and `colcov` can be two-dimensional array_likes specifying the
covariance matrices directly. Alternatively, a one-dimensional array will
be be interpreted as the entries of a diagonal matrix, and a scalar or
zero-dimensional array will be interpreted as this value times the
identity matrix.
"""
_matnorm_doc_frozen_callparams = ""
_matnorm_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
matnorm_docdict_params = {
'_matnorm_doc_default_callparams': _matnorm_doc_default_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
matnorm_docdict_noparams = {
'_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class matrix_normal_gen(multi_rv_generic):
r"""
A matrix normal random variable.
The `mean` keyword specifies the mean. The `rowcov` keyword specifies the
among-row covariance matrix. The 'colcov' keyword specifies the
among-column covariance matrix.
Methods
-------
``pdf(X, mean=None, rowcov=1, colcov=1)``
Probability density function.
``logpdf(X, mean=None, rowcov=1, colcov=1)``
Log of the probability density function.
``rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)``
Draw random samples.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" matrix normal
random variable:
rv = matrix_normal(mean=None, rowcov=1, colcov=1)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_matnorm_doc_callparams_note)s
The covariance matrices specified by `rowcov` and `colcov` must be
(symmetric) positive definite. If the samples in `X` are
:math:`m \times n`, then `rowcov` must be :math:`m \times m` and
`colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`.
The probability density function for `matrix_normal` is
.. math::
f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}}
\exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1}
(X-M)^T \right] \right),
where :math:`M` is the mean, :math:`U` the among-row covariance matrix,
:math:`V` the among-column covariance matrix.
The `allow_singular` behaviour of the `multivariate_normal`
distribution is not currently supported. Covariance matrices must be
full rank.
The `matrix_normal` distribution is closely related to the
`multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)`
(the vector formed by concatenating the columns of :math:`X`) has a
multivariate normal distribution with mean :math:`\mathrm{Vec}(M)`
and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker
product). Sampling and pdf evaluation are
:math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but
:math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,
making this equivalent form algorithmically inefficient.
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.stats import matrix_normal
>>> M = np.arange(6).reshape(3,2); M
array([[0, 1],
[2, 3],
[4, 5]])
>>> U = np.diag([1,2,3]); U
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> V = 0.3*np.identity(2); V
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1; X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
0.023410202050005054
>>> # Equivalent multivariate normal
>>> from scipy.stats import multivariate_normal
>>> vectorised_X = X.T.flatten()
>>> equiv_mean = M.T.flatten()
>>> equiv_cov = np.kron(V,U)
>>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)
0.023410202050005054
"""
def __init__(self, seed=None):
super(matrix_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)
def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""
Create a frozen matrix normal distribution.
See `matrix_normal_frozen` for more information.
"""
return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)
def _process_parameters(self, mean, rowcov, colcov):
"""
Infer dimensionality from mean or covariance matrices. Handle
defaults. Ensure compatible dimensions.
"""
# Process mean
if mean is not None:
mean = np.asarray(mean, dtype=float)
meanshape = mean.shape
if len(meanshape) != 2:
raise ValueError("Array `mean` must be two dimensional.")
if np.any(meanshape == 0):
raise ValueError("Array `mean` has invalid shape.")
# Process among-row covariance
rowcov = np.asarray(rowcov, dtype=float)
if rowcov.ndim == 0:
if mean is not None:
rowcov = rowcov * np.identity(meanshape[0])
else:
rowcov = rowcov * np.identity(1)
elif rowcov.ndim == 1:
rowcov = np.diag(rowcov)
rowshape = rowcov.shape
if len(rowshape) != 2:
raise ValueError("`rowcov` must be a scalar or a 2D array.")
if rowshape[0] != rowshape[1]:
raise ValueError("Array `rowcov` must be square.")
if rowshape[0] == 0:
raise ValueError("Array `rowcov` has invalid shape.")
numrows = rowshape[0]
# Process among-column covariance
colcov = np.asarray(colcov, dtype=float)
if colcov.ndim == 0:
if mean is not None:
colcov = colcov * np.identity(meanshape[1])
else:
colcov = colcov * np.identity(1)
elif colcov.ndim == 1:
colcov = np.diag(colcov)
colshape = colcov.shape
if len(colshape) != 2:
raise ValueError("`colcov` must be a scalar or a 2D array.")
if colshape[0] != colshape[1]:
raise ValueError("Array `colcov` must be square.")
if colshape[0] == 0:
raise ValueError("Array `colcov` has invalid shape.")
numcols = colshape[0]
# Ensure mean and covariances compatible
if mean is not None:
if meanshape[0] != numrows:
raise ValueError("Arrays `mean` and `rowcov` must have the"
"same number of rows.")
if meanshape[1] != numcols:
raise ValueError("Arrays `mean` and `colcov` must have the"
"same number of columns.")
else:
mean = np.zeros((numrows,numcols))
dims = (numrows, numcols)
return dims, mean, rowcov, colcov
def _process_quantiles(self, X, dims):
"""
Adjust quantiles array so that last two axes labels the components of
each data point.
"""
X = np.asarray(X, dtype=float)
if X.ndim == 2:
X = X[np.newaxis, :]
if X.shape[-2:] != dims:
raise ValueError("The shape of array `X` is not compatible "
"with the distribution parameters.")
return X
def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,
col_prec_rt, log_det_colcov):
"""
Parameters
----------
dims : tuple
Dimensions of the matrix variates
X : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
row_prec_rt : ndarray
A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)
is the inverse of the among-row covariance matrix
log_det_rowcov : float
Logarithm of the determinant of the among-row covariance matrix
col_prec_rt : ndarray
A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)
is the inverse of the among-column covariance matrix
log_det_colcov : float
Logarithm of the determinant of the among-column covariance matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
numrows, numcols = dims
roll_dev = np.rollaxis(X-mean, axis=-1, start=0)
scale_dev = np.tensordot(col_prec_rt.T,
np.dot(roll_dev, row_prec_rt), 1)
maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)
return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov
+ numrows*log_det_colcov + maha)
def logpdf(self, X, mean=None, rowcov=1, colcov=1):
"""
Log of the matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
logpdf : ndarray
Log of the probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
X = self._process_quantiles(X, dims)
rowpsd = _PSD(rowcov, allow_singular=False)
colpsd = _PSD(colcov, allow_singular=False)
out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,
colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X, mean=None, rowcov=1, colcov=1):
"""
Matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
return np.exp(self.logpdf(X, mean, rowcov, colcov))
def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):
"""
Draw random samples from a matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `dims`), where `dims` is the
dimension of the random matrices.
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
size = int(size)
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
rowchol = scipy.linalg.cholesky(rowcov, lower=True)
colchol = scipy.linalg.cholesky(colcov, lower=True)
random_state = self._get_random_state(random_state)
std_norm = random_state.standard_normal(size=(dims[1],size,dims[0]))
roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1)
out = np.rollaxis(roll_rvs.T, axis=1, start=0) + mean[np.newaxis,:,:]
if size == 1:
#out = np.squeeze(out, axis=0)
out = out.reshape(mean.shape)
return out
matrix_normal = matrix_normal_gen()
class matrix_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""
Create a frozen matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
seed : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
Examples
--------
>>> from scipy.stats import matrix_normal
>>> distn = matrix_normal(mean=np.zeros((3,3)))
>>> X = distn.rvs(); X
array([[-0.02976962, 0.93339138, -0.09663178],
[ 0.67405524, 0.28250467, -0.93308929],
[-0.31144782, 0.74535536, 1.30412916]])
>>> distn.pdf(X)
2.5160642368346784e-05
>>> distn.logpdf(X)
-10.590229595124615
"""
self._dist = matrix_normal_gen(seed)
self.dims, self.mean, self.rowcov, self.colcov = \
self._dist._process_parameters(mean, rowcov, colcov)
self.rowpsd = _PSD(self.rowcov, allow_singular=False)
self.colpsd = _PSD(self.colcov, allow_singular=False)
def logpdf(self, X):
X = self._dist._process_quantiles(X, self.dims)
out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,
self.rowpsd.log_pdet, self.colpsd.U,
self.colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X):
return np.exp(self.logpdf(X))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,
random_state)
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = matrix_normal_gen.__dict__[name]
method_frozen = matrix_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = %s." % (alpha.shape, ))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = %s "
"and x.shape = %s." % (alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) <= 0:
raise ValueError("Each entry in 'x' must be greater than zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""
Internal helper function to compute the log of the useful quotient
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""
A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i \le 1
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the dirichlet interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
"""
def __init__(self, seed=None):
super(dirichlet_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0)
def logpdf(self, x, alpha):
"""
Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""
The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""
Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : scalar
Mean of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""
Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : scalar
Variance of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return out
def entropy(self, alpha):
"""
Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""
Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""
A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix).
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(wishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis,np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df < dim:
raise ValueError("Degrees of freedom cannot be less than dimension"
" of scale matrix, but df = %d" % df)
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.zeros(x.shape[-1])
scale_inv_x = np.zeros(x.shape)
tr_scale_inv_x = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:,:,i])
scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i])
tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""
Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""
Mean of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""
Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""
Variance of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : np.random.RandomState instance
RandomState used for drawing the random variates.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) + shape[::-1]).T
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None,None,None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""
Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""
Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""
Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
from numpy import asarray_chkfinite, asarray
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,))
tril_idx = np.tril_indices(a.shape[-2], k=-1)
triu_idx = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_idx] = a1[index][tril_idx]
return a1
class invwishart_gen(wishart_gen):
r"""
An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in
Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(invwishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.zeros(x.shape[-1])
#scale_x_inv = np.zeros(x.shape)
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
#scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""
Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""
Mean of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""
Mode of the inverse Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""
Variance of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super(invwishart_gen, self)._standard_rvs(n, shape, dim,
df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""
Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
class special_ortho_group_gen(multi_rv_generic):
r"""
A matrix-valued SO(N) random variable.
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from SO(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
----------
This class is wrapping the random_rot code from the MDP Toolkit,
https://github.com/mdp-toolkit/mdp-toolkit
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., "The efficient generation of random orthogonal
matrices with an application to condition estimators", SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization
See also the similar `ortho_group`.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> x = special_ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> scipy.linalg.det(x)
1.0
This generates one random matrix from SO(3). It is orthogonal and
has a determinant of 1.
"""
def __init__(self, seed=None):
super(special_ortho_group_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""
Create a frozen SO(N) distribution.
See `special_ortho_group_frozen` for more information.
"""
return special_ortho_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""
Dimension N must be specified; it cannot be inferred.
"""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("""Dimension of rotation must be specified,
and must be a scalar greater than 1.""")
return dim
def rvs(self, dim, size=1, random_state=None):
"""
Draw random samples from SO(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
random_state = self._get_random_state(random_state)
H = np.eye(dim)
D = np.ones((dim,))
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
D[n-1] = np.sign(x[0])
x[0] -= D[n-1]*np.sqrt((x*x).sum())
# Householder transformation
Hx = (np.eye(dim-n+1)
- 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = (-1)**(1-(dim % 2))*D.prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return H
special_ortho_group = special_ortho_group_gen()
class special_ortho_group_frozen(multi_rv_frozen):
def __init__(self, dim=None, seed=None):
"""
Create a frozen SO(N) distribution.
Parameters
----------
dim : scalar
Dimension of matrices
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> g = special_ortho_group(5)
>>> x = g.rvs()
"""
self._dist = special_ortho_group_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
class ortho_group_gen(multi_rv_generic):
r"""
A matrix-valued O(N) random variable.
Return a random orthogonal matrix, drawn from the O(N) Haar
distribution (the only uniform distribution on O(N)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from O(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
----------
This class is closely related to `special_ortho_group`.
Some care is taken to avoid numerical error, as per the paper by Mezzadri.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", arXiv:math-ph/0609050v2.
Examples
--------
>>> from scipy.stats import ortho_group
>>> x = ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> np.fabs(scipy.linalg.det(x))
1.0
This generates one random matrix from O(3). It is orthogonal and
has a determinant of +1 or -1.
"""
def __init__(self, seed=None):
super(ortho_group_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""
Dimension N must be specified; it cannot be inferred.
"""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""
Draw random samples from O(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
random_state = self._get_random_state(random_state)
H = np.eye(dim)
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
# random sign, 50/50, but chosen carefully to avoid roundoff error
D = np.sign(x[0])
x[0] += D*np.sqrt((x*x).sum())
# Householder transformation
Hx = -D*(np.eye(dim-n+1)
- 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
return H
ortho_group = ortho_group_gen()
class random_correlation_gen(multi_rv_generic):
r"""
A random correlation matrix.
Return a random correlation matrix, given a vector of eigenvalues.
The `eigs` keyword specifies the eigenvalues of the correlation matrix,
and implies the dimension.
Methods
-------
``rvs(eigs=None, random_state=None)``
Draw random correlation matrices, all with eigenvalues eigs.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix.
Notes
----------
Generates a random correlation matrix following a numerically stable
algorithm spelled out by Davies & Higham. This algorithm uses a single O(N)
similarity transformation to construct a symmetric positive semi-definite
matrix, and applies a series of Givens rotations to scale it to have ones
on the diagonal.
References
----------
.. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation
of correlation matrices and their factors", BIT 2000, Vol. 40,
No. 4, pp. 640 651
Examples
--------
>>> from scipy.stats import random_correlation
>>> np.random.seed(514)
>>> x = random_correlation.rvs((.5, .8, 1.2, 1.5))
>>> x
array([[ 1. , -0.20387311, 0.18366501, -0.04953711],
[-0.20387311, 1. , -0.24351129, 0.06703474],
[ 0.18366501, -0.24351129, 1. , 0.38530195],
[-0.04953711, 0.06703474, 0.38530195, 1. ]])
>>> import scipy.linalg
>>> e, v = scipy.linalg.eigh(x)
>>> e
array([ 0.5, 0.8, 1.2, 1.5])
"""
def __init__(self, seed=None):
super(random_correlation_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, eigs, tol):
eigs = np.asarray(eigs, dtype=float)
dim = eigs.size
if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1:
raise ValueError("Array 'eigs' must be a vector of length greater than 1.")
if np.fabs(np.sum(eigs) - dim) > tol:
raise ValueError("Sum of eigenvalues must equal dimensionality.")
for x in eigs:
if x < -tol:
raise ValueError("All eigenvalues must be non-negative.")
return dim, eigs
def _givens_to_1(self, aii, ajj, aij):
"""Computes a 2x2 Givens matrix to put 1's on the diagonal for the input matrix.
The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ].
The output matrix g is a 2x2 anti-symmetric matrix of the form [ c s ; -s c ];
the elements c and s are returned.
Applying the output matrix to the input matrix (as b=g.T M g)
results in a matrix with bii=1, provided tr(M) - det(M) >= 1
and floating point issues do not occur. Otherwise, some other
valid rotation is returned. When tr(M)==2, also bjj=1.
"""
aiid = aii - 1.
ajjd = ajj - 1.
if ajjd == 0:
# ajj==1, so swap aii and ajj to avoid division by zero
return 0., 1.
dd = math.sqrt(max(aij**2 - aiid*ajjd, 0))
# The choice of t should be chosen to avoid cancellation [1]
t = (aij + math.copysign(dd, aij)) / ajjd
c = 1. / math.sqrt(1. + t*t)
if c == 0:
# Underflow
s = 1.0
else:
s = c*t
return c, s
def _to_corr(self, m):
"""
Given a psd matrix m, rotate to put one's on the diagonal, turning it
into a correlation matrix. This also requires the trace equal the
dimensionality. Note: modifies input matrix
"""
# Check requirements for in-place Givens
if not (m.flags.c_contiguous and m.dtype == np.float64 and m.shape[0] == m.shape[1]):
raise ValueError()
d = m.shape[0]
for i in range(d-1):
if m[i,i] == 1:
continue
elif m[i, i] > 1:
for j in range(i+1, d):
if m[j, j] < 1:
break
else:
for j in range(i+1, d):
if m[j, j] > 1:
break
c, s = self._givens_to_1(m[i,i], m[j,j], m[i,j])
# Use BLAS to apply Givens rotations in-place. Equivalent to:
# g = np.eye(d)
# g[i, i] = g[j,j] = c
# g[j, i] = -s; g[i, j] = s
# m = np.dot(g.T, np.dot(m, g))
mv = m.ravel()
drot(mv, mv, c, -s, n=d,
offx=i*d, incx=1, offy=j*d, incy=1,
overwrite_x=True, overwrite_y=True)
drot(mv, mv, c, -s, n=d,
offx=i, incx=d, offy=j, incy=d,
overwrite_x=True, overwrite_y=True)
return m
def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7):
"""
Draw random correlation matrices
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix
tol : float, optional
Tolerance for input parameter checks
diag_tol : float, optional
Tolerance for deviation of the diagonal of the resulting
matrix. Default: 1e-7
Raises
------
RuntimeError
Floating point error prevented generating a valid correlation
matrix.
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim),
each having eigenvalues eigs.
"""
dim, eigs = self._process_parameters(eigs, tol=tol)
random_state = self._get_random_state(random_state)
m = ortho_group.rvs(dim, random_state=random_state)
m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m
m = self._to_corr(m) # Carefully rotate to unit diagonal
# Check diagonal
if abs(m.diagonal() - 1).max() > diag_tol:
raise RuntimeError("Failed to generate a valid correlation matrix")
return m
random_correlation = random_correlation_gen()
| mit |
felipemontefuscolo/bitme | research/ar.py | 1 | 2635 | from statsmodels.tsa.ar_model import AR
from statsmodels.graphics.tsaplots import *
from matplotlib.pyplot import *
import scipy.signal as sg # for scipy.signal.welch
from scipy.ndimage.interpolation import shift
# this script is just to understand how AR model works
def diff(x, n, remove_nan=True):
r = x - shift(x, n, cval=np.NaN)
if remove_nan:
r = r[~np.isnan(r)]
return r
def hat_func(n):
assert n % 2 == 1
x = np.arange(n).astype('float64')
y = np.arange(n).astype('float64')
for i in range(n / 2, n):
y[i] = n - 1 - i
return x, y
def n_hat(n, num_hats):
assert num_hats > 0
y = hat_func(n)[1]
for i in range(num_hats - 1):
a = 0.5 if i % 2 == 0 else 1.
y = np.append(y[:-1], a * hat_func(n)[1])
return np.arange(len(y)), y
def sin_(n, n_osc=3.):
x = np.arange(n).astype('float64')
T = float((n - 1)) / float(n_osc)
print('period = ' + str(T))
y = np.cos(2. * np.pi / T * x * (1. + 0.00*x))
return x, y
N = 301
# x, y = n_hat(N, 6)
x, y = sin_(N, 5)
y = y + 0.5 * np.random.normal(0., .5, len(y))
#y = diff(y, 15)
#x = np.arange(len(y)).astype('float64')
# plot AR prediction ======================
model = AR(y)
model_fit = model.fit()
pred = model_fit.predict(start=model_fit.k_ar, end=int(2 * N), dynamic=False)
xx = np.arange(model_fit.k_ar, model_fit.k_ar + len(pred))
print(model_fit.k_ar)
print(model_fit.params)
plot(x, y)
plot(xx, pred)
show()
# # =======================================
# # plot SARIMAX prediction =================
# best_pdq = get_best_pdq_sarimax(y, max_d=2, max_q=1)
# print("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA best pdq = " + str(best_pdq))
# model = SARIMAX(y, order=best_pdq)
# model_fit = model.fit(disp=0)
# pred = model_fit.predict(start=N/2, end=int(2 * N), dynamic=False)
# xx = np.arange(N/2, N/2 + len(pred))
# #sys.exit(0)
# print(model_fit.params)
# plot(x, y)
# plot(xx, pred)
# show()
# # =======================================
#per = sg.periodogram(y if len(y) % 2 == 0 else y[1:], fs=x[1], scaling='spectrum')
per = sg.welch(y, fs=x[1], nperseg=N if N%2==0 else N-1, detrend='linear')
# remove 0 freqs
freqs = per[0][1:]
periods = (1. / np.array(freqs))[::-1]
power_spectrum = np.array(per[1][1:])[::-1]
print("max spec = " + str(periods[np.argmax(power_spectrum)]))
plot(periods, power_spectrum)
show()
fig, axes = subplots(3, 1, figsize=(10, 9))
fig.tight_layout()
axes[0].plot(y)
axes[0].title.set_text('sin with err=0, slope=0, variance slope=3/101')
plot_acf(y, ax=axes[1], unbiased=True)
plot_pacf(y, ax=axes[2], method='ywm')
# autocorrelation_plot(df)
show()
| mpl-2.0 |
humdings/zipline | tests/pipeline/base.py | 2 | 5349 | """
Base class for Pipeline API unit tests.
"""
import numpy as np
from numpy import arange, prod
from pandas import DataFrame, Timestamp
from six import iteritems
from zipline.utils.compat import wraps
from zipline.pipeline.engine import SimplePipelineEngine
from zipline.pipeline import ExecutionPlan
from zipline.pipeline.term import AssetExists, InputDates
from zipline.testing import (
check_arrays,
ExplodingObject,
)
from zipline.testing.fixtures import (
WithAssetFinder,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.utils.functional import dzip_exact
from zipline.utils.pandas_utils import explode
def with_defaults(**default_funcs):
"""
Decorator for providing dynamic default values for a method.
Usages:
@with_defaults(foo=lambda self: self.x + self.y)
def func(self, foo):
...
If a value is passed for `foo`, it will be used. Otherwise the function
supplied to `with_defaults` will be called with `self` as an argument.
"""
def decorator(f):
@wraps(f)
def method(self, *args, **kwargs):
for name, func in iteritems(default_funcs):
if name not in kwargs:
kwargs[name] = func(self)
return f(self, *args, **kwargs)
return method
return decorator
with_default_shape = with_defaults(shape=lambda self: self.default_shape)
class BasePipelineTestCase(WithTradingSessions,
WithAssetFinder,
ZiplineTestCase):
START_DATE = Timestamp('2014', tz='UTC')
END_DATE = Timestamp('2014-12-31', tz='UTC')
ASSET_FINDER_EQUITY_SIDS = list(range(20))
@classmethod
def init_class_fixtures(cls):
super(BasePipelineTestCase, cls).init_class_fixtures()
cls.default_asset_exists_mask = cls.asset_finder.lifetimes(
cls.nyse_sessions[-30:],
include_start_date=False,
)
@property
def default_shape(self):
"""Default shape for methods that build test data."""
return self.default_asset_exists_mask.shape
def run_graph(self, graph, initial_workspace, mask=None):
"""
Compute the given TermGraph, seeding the workspace of our engine with
`initial_workspace`.
Parameters
----------
graph : zipline.pipeline.graph.TermGraph
Graph to run.
initial_workspace : dict
Initial workspace to forward to SimplePipelineEngine.compute_chunk.
mask : DataFrame, optional
This is a value to pass to `initial_workspace` as the mask from
`AssetExists()`. Defaults to a frame of shape `self.default_shape`
containing all True values.
Returns
-------
results : dict
Mapping from termname -> computed result.
"""
engine = SimplePipelineEngine(
lambda column: ExplodingObject(),
self.nyse_sessions,
self.asset_finder,
)
if mask is None:
mask = self.default_asset_exists_mask
dates, assets, mask_values = explode(mask)
initial_workspace.setdefault(AssetExists(), mask_values)
initial_workspace.setdefault(InputDates(), dates)
return engine.compute_chunk(
graph,
dates,
assets,
initial_workspace,
)
def check_terms(self,
terms,
expected,
initial_workspace,
mask,
check=check_arrays):
"""
Compile the given terms into a TermGraph, compute it with
initial_workspace, and compare the results with ``expected``.
"""
start_date, end_date = mask.index[[0, -1]]
graph = ExecutionPlan(
terms,
all_dates=self.nyse_sessions,
start_date=start_date,
end_date=end_date,
)
results = self.run_graph(graph, initial_workspace, mask)
for key, (res, exp) in dzip_exact(results, expected).items():
check(res, exp)
return results
def build_mask(self, array):
"""
Helper for constructing an AssetExists mask from a boolean-coercible
array.
"""
ndates, nassets = array.shape
return DataFrame(
array,
# Use the **last** N dates rather than the first N so that we have
# space for lookbacks.
index=self.nyse_sessions[-ndates:],
columns=self.ASSET_FINDER_EQUITY_SIDS[:nassets],
dtype=bool,
)
@with_default_shape
def arange_data(self, shape, dtype=np.float64):
"""
Build a block of testing data from numpy.arange.
"""
return arange(prod(shape), dtype=dtype).reshape(shape)
@with_default_shape
def randn_data(self, seed, shape):
"""
Build a block of testing data from a seeded RandomState.
"""
return np.random.RandomState(seed).randn(*shape)
@with_default_shape
def eye_mask(self, shape):
"""
Build a mask using np.eye.
"""
return ~np.eye(*shape, dtype=bool)
@with_default_shape
def ones_mask(self, shape):
return np.ones(shape, dtype=bool)
| apache-2.0 |
alan-mnix/MLFinalProject | final.py | 1 | 3568 | import heapq, itertools
import nltk, re, numpy
import json
import re
import sklearn
import sklearn.metrics
import sklearn.svm
from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer
import time
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
import sys
#import psyco
#psyco.full()
pattern=re.compile("[^\w']")
path = 'dataset/'
token_dict = {}
p = re.compile(r"[\w']+")
class StemmingTokenizer(object):
def __init__(self):
#self.stemmer = nltk.stem.RSLPStemmer()
self.stemmer = nltk.stem.RegexpStemmer('ing$|s$|e$|able$|or$|er$|ness$|ional$|ful$|ment$', min = 4)
#self.stemmer = nltk.stem.porter.PorterStemmer()
def __call__(self, doc):
return [self.stemmer.stem(t.lower()) for t in p.findall(doc)]
#print StemmingTokenizer()('zoomin zoomin xypoint')
def read(file):
data = map(json.loads, open(file).readlines())
m = data[0]
data.pop(0)
return m, data
def _toAll(files, tfidf):
t= time.time()
#feats = tfidf.get_feature_names()
list = [None]*len(files)
for i in range(len(files)):
file = files[i]
text = file['question'] + ' ' + file['excerpt']
#text = text.split()
#t = []
#for i in text:
# if i in feats:
# t.append(i)
#text = string.join(' ', t)
#text = file
#caracters = ["\'", "\n", "\r", "\t"]
#for c in caracters:
# text = text.replace(c, "")
list[i] = text
return list
def _toText(files):
t= time.time()
list = [""]*len(files)
for i in range(len(files)):
file = files[i]
#text = file['question'] + ' ' + file['excerpt']
#file["excerpt"] = unicodedata.normalize('NFKD', file["excerpt"]).encode('ascii', 'ignore')
#file["question"] = unicodedata.normalize('NFKD', file["question"]).encode('ascii', 'ignore')
#text = str(re.sub(r'[^\x00-\x7F]+',' ', file["question"])+" "+re.sub(r'[^\x00-\x7F]+',' ', file["excerpt"]))
text = file['question'] + ' ' + file['excerpt']
#text = file
#caracters = ["\'", "\n", "\r", "\t"]
#for c in caracters:
# text = text.replace(c, "")
list[i] = text
return list
def vectorizeTFIDF(files):
t = time.time()
list = _toText(files)
tfidf = TfidfVectorizer(tokenizer = StemmingTokenizer(), token_pattern="word", stop_words="english")
tfs = tfidf.fit_transform(list)
#print 'TFIDF: ', time.time() - t
return tfs, tfidf
def numericLabels(labels):
d = {'mathematica':0, 'photo':1, 'apple':2, 'unix':3, 'android':4, 'security':5, 'wordpress':6, 'gis':7, 'scifi':8, 'electronics':9 }
return [d[x] for x in labels]
def main():
raw_train_file = 'training.json'
mtrain, train = read(raw_train_file)
train_label = numpy.array([x['topic'] for x in train])
#Chama a funcao que faz o n-gram, na qual constroi o dicionario e entao chama a funcao do TF-IDF
train_tfs, tfidf = vectorizeTFIDF(train)
clf = sklearn.svm.LinearSVC(multi_class='ovr', dual=False, tol=1e-10)
#lfp = sklearn.svm.LinearSVC(multi_class='ovr', tol=1e-6)
#t = time.time()
#corpus, dic = get_corpus(train)
#print 'corpus: ',time.time()-t
#tfidf = create_tfidf(corpus, dic)
clf.fit(train_tfs, train_label)
data = sys.stdin.readlines()
m = int(data[0])
inputs = [None]*m
for i in xrange(m):
t = data[i+1]
inputs[i] = json.loads(t)
t = time.time()
X = tfidf.transform(_toText(inputs))
#print 'Transform: ', time.time()-t
t = time.time()
yp = clf.predict(X)
sys.stdout.write('\n'.join(yp))
main()
| gpl-2.0 |
mogeiwang/nest | pynest/examples/plot_weight_matrices.py | 4 | 3194 | # -*- coding: utf-8 -*-
#
# plot_weight_matrices.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pylab
import nest
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_weight_matrices(E_neurons, I_neurons):
'''Extracts and plots weight matrices'''
W_EE = np.zeros([len(E_neurons), len(E_neurons)])
W_EI = np.zeros([len(I_neurons), len(E_neurons)])
W_IE = np.zeros([len(E_neurons), len(I_neurons)])
W_II = np.zeros([len(I_neurons), len(I_neurons)])
a_EE = nest.GetConnections(E_neurons, E_neurons)
c_EE = nest.GetStatus(a_EE, keys='weight')
a_EI = nest.GetConnections(I_neurons, E_neurons)
c_EI = nest.GetStatus(a_EI, keys='weight')
a_IE = nest.GetConnections(E_neurons, I_neurons)
c_IE = nest.GetStatus(a_IE, keys='weight')
a_II = nest.GetConnections(I_neurons, I_neurons)
c_II = nest.GetStatus(a_II, keys='weight')
for idx,n in enumerate(a_EE):
W_EE[n[0]-min(E_neurons), n[1]-min(E_neurons)] += c_EE[idx]
for idx,n in enumerate(a_EI):
W_EI[n[0]-min(I_neurons), n[1]-min(E_neurons)] += c_EI[idx]
for idx,n in enumerate(a_IE):
W_IE[n[0]-min(E_neurons), n[1]-min(I_neurons)] += c_IE[idx]
for idx,n in enumerate(a_II):
W_II[n[0]-min(I_neurons), n[1]-min(I_neurons)] += c_II[idx]
fig = pylab.figure()
fig.suptitle('Weight matrices', fontsize=14)
gs = gridspec.GridSpec(4,4)
ax1 = pylab.subplot(gs[:-1,:-1])
ax2 = pylab.subplot(gs[:-1,-1])
ax3 = pylab.subplot(gs[-1,:-1])
ax4 = pylab.subplot(gs[-1,-1])
plt1 = ax1.imshow(W_EE)
plt1.set_cmap('jet')
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", "5%", pad="3%")
pylab.colorbar(plt1, cax=cax)
ax1.set_title('W_{EE}')
pylab.tight_layout()
plt2 = ax2.imshow(W_IE)
plt2.set_cmap('jet')
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", "5%", pad="3%")
pylab.colorbar(plt2, cax=cax)
ax2.set_title('W_{EI}')
pylab.tight_layout()
plt3 = ax3.imshow(W_EI)
plt3.set_cmap('jet')
divider = make_axes_locatable(ax3)
cax = divider.append_axes("right", "5%", pad="3%")
pylab.colorbar(plt3, cax=cax)
ax3.set_title('W_{IE}')
pylab.tight_layout()
plt4 = ax4.imshow(W_II)
plt4.set_cmap('jet')
divider = make_axes_locatable(ax4)
cax = divider.append_axes("right", "5%", pad="3%")
pylab.colorbar(plt4, cax=cax)
ax4.set_title('W_{II}')
pylab.tight_layout()
| gpl-2.0 |
ilo10/scikit-learn | examples/svm/plot_rbf_parameters.py | 57 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radius Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
Scan-o-Matic/scanomatic | scanomatic/qc/compilation_results.py | 1 | 9311 | from __future__ import absolute_import
import glob
from itertools import izip
import os
import re
import time
from types import StringTypes
from matplotlib import pyplot as plt
import numpy as np
from scanomatic.io.movie_writer import MovieWriter
from scanomatic.models.factories.compile_project_factory import (
CompileImageAnalysisFactory
)
_img_pattern = re.compile(r".*_[0-9]{4}_[0-9.]+\.tiff$")
_time_pattern = re.compile(r'[0-9]+\.[0-9]*')
def _input_validate(f):
def wrapped(*args, **kwargs):
if len(args) > 0:
if isinstance(args[0], StringTypes):
args = list(args)
args[0] = CompileImageAnalysisFactory.serializer.load(args[0])
return f(*args, **kwargs)
return wrapped
def simulate_positioning(project_compilation, positioning):
assert positioning in ('detected', 'probable', 'one-time'), "Not understood positioning mode"
positions = np.array([(image.fixture.orientation_marks_x, image.fixture.orientation_marks_y)
for image in project_compilation])
if positioning == "probable":
positions[:] = np.round(np.median(positions, axis=0))
elif positioning == "one-time":
positions[:] = positions[-1]
return positions
@_input_validate
def get_grayscale_variability(project_compilation):
data = np.array([i.fixture.grayscale.values for i in project_compilation])
return np.var(data, axis=0) / np.mean(data, axis=0)
@_input_validate
def get_grayscale_outlier_images(project_compilation, max_distance=3.0, only_image_indices=False):
data = np.array([image.fixture.grayscale.values for image in project_compilation])
norm = np.median(data, axis=0)
sq_distances = np.sum((data - norm) ** 2, axis=1)
threshold = max_distance ** 2 * np.median(sq_distances)
return [(i if only_image_indices else image) for i, image in enumerate(project_compilation)
if sq_distances[i] > threshold]
@_input_validate
def plot_grayscale_histogram(project_compilation, mark_outliers=True, max_distance=3.0, save_target=None):
data = [image.fixture.grayscale.values for image in project_compilation]
length = max(len(v) for v in data if v is not None)
empty = np.zeros((length,), dtype=float) * np.inf
data = [empty if d is None else d for d in data]
data = np.array(data)
if mark_outliers:
outliers = get_grayscale_outlier_images(project_compilation, max_distance) if mark_outliers else []
else:
outliers = None
f = plt.figure()
f.clf()
ax = f.gca()
ax.imshow(data, interpolation='nearest', aspect='auto')
ax.set_ylabel("Image index")
ax.set_xlabel("Grayscale segment")
ax.set_title("Grayscale segment measured values as colors" +
((" (arrows, outliers)" if outliers else " (no outliers)") if mark_outliers else ""))
if outliers:
segments = data.shape[1]
for outlier in outliers:
ax.annotate(outlier.image.index, (segments, outlier.image.index), color='k')
ax.set_xlim(0, segments)
if save_target is not None:
f.savefig(save_target)
return f
@_input_validate
def animate_marker_positions(project_compilation, fig=None, slice_size=201,
positioning='detected', save_target="marker_positions.avi",
title="Position markers", comment="", fps=12):
assert slice_size % 2 == 1, "Slice size may not be even"
positions = simulate_positioning(project_compilation, positioning)
paths = [image.image.path if os.path.isfile(image.image.path) else os.path.basename(image.image.path)
for image in project_compilation]
plt.ion()
if fig is None:
fig = plt.figure()
fig.clf()
images = [None for _ in range(positions.shape[-1])]
half_slice_size = np.floor(slice_size / 2.0)
for idx in range(len(images)):
ax = fig.add_subplot(len(images), 1, idx + 1)
images[idx] = ax.imshow(
np.zeros((slice_size, slice_size), dtype=np.float), cmap=plt.cm.gray, vmin=0, vmax=255)
ax.axvline(half_slice_size, color='c')
ax.axhline(half_slice_size, color='c')
def make_cutout(img, pos_y, pos_x):
cutout = np.zeros((slice_size, slice_size), dtype=np.float) * np.nan
cutout[abs(min(pos_x - half_slice_size, 0)): min(cutout.shape[0], img.shape[0] - pos_x),
abs(min(pos_y - half_slice_size, 0)): min(cutout.shape[1], img.shape[1] - pos_y)] = \
img[max(pos_x - half_slice_size, 0): min(pos_x + half_slice_size + 1, img.shape[0]),
max(pos_y - half_slice_size, 0): min(pos_y + half_slice_size + 1, img.shape[1])]
return cutout
@MovieWriter(save_target, title=title, comment=comment, fps=fps, fig=fig)
def _animate():
data = [None for _ in range(positions.shape[0])]
for index in range(positions.shape[0]):
if data[index] is None:
image = plt.imread(paths[index])
data[index] = []
for im_index, im in enumerate(images):
im_slice = make_cutout(image, *positions[index, :, im_index])
im.set_data(im_slice)
data[index].append(im_slice)
else:
for im_index, im in enumerate(images):
im.set_data(data[index][im_index])
fig.axes[0].set_title("Time {0}".format(index))
yield
_animate()
return fig
@_input_validate
def get_irregular_intervals(project_compilation, max_deviation=0.05):
return _get_irregular_intervals([i.image.time_stamp for i in project_compilation], max_deviation)
def get_irregular_intervals_from_file_names(directory, max_deviation=0.05):
images = [float(_time_pattern.findall(f)[-1]) for f in sorted(glob.glob(os.path.join(directory, "*.tiff")))
if _img_pattern.match(f)]
return _get_irregular_intervals(images, max_deviation)
def _get_irregular_intervals(data, max_deviation):
diff = np.diff(data)
norm = np.abs(np.median(diff))
irregulars = np.where(np.abs(1 - diff / norm) > max_deviation)[0]
return tuple((i + 1, diff[i]) for i in irregulars)
@_input_validate
def plot_positional_markers(project_compilation, save_target=None):
data = _get_marker_sorted_data(project_compilation)
shape = np.max([image.fixture.shape for image in project_compilation], axis=0)
scans = data.shape[0]
f = plt.figure()
f.clf()
ax = f.add_subplot(2, 2, 1)
for x, y in data:
ax.plot(x, y, 'x')
for i in range(data.shape[2])[:3]:
ax.annotate(i + 1, (data[:, 0, i].mean(), data[:, 1, i].mean()), textcoords='offset points', xytext=(10, -5))
ax.set_xlim(0, shape[1])
ax.set_ylim(0, shape[0])
ax.set(adjustable='box', aspect=1)
ax.set_title("Position marker centers")
cm = plt.get_cmap("Blues")
for i in range(data.shape[2])[:3]:
ax = f.add_subplot(2, 2, i + 2)
x = data[:, 0, i]
y = data[:, 1, i]
x -= x.min()
y -= y.min()
im = np.zeros((round(x.max()) + 1, round(y.max()) + 1), dtype=int)
for x_val, y_val in izip(x, y):
im[round(x_val), round(y_val)] += 1
print "Marker ", i + 1, '\n', im, '\n'
ax.imshow(im.T, interpolation="none", cmap=cm, vmin=0, vmax=scans)
for idx0, series in enumerate(im):
for idx1, value in enumerate(series):
ax.annotate(value, (idx0, idx1), color=cm(0.9) if value < scans/2.0 else cm(0.1), ha='center', va='center')
ax.set_title("Marker {0} pos freqs".format(i + 1))
ax.axis('off')
f.tight_layout()
if save_target is not None:
f.savefig(save_target)
return f
@_input_validate
def get_positional_markers_variability(project_compilation):
data = _get_marker_sorted_data(project_compilation)
return np.var(data, axis=0) / np.median(data, axis=0)
@_input_validate
def get_positional_marker_outlier_images(project_compilation, max_distance=4, only_image_indices=False):
data = _get_marker_sorted_data(project_compilation)
norm = np.median(data, axis=0)
sq_distances = np.sum((data - norm) ** 2, axis=(1, 2))
irregulars = np.where(sq_distances > max_distance ** 2)[0]
return irregulars if only_image_indices else tuple(project_compilation[i] for i in irregulars)
def _get_marker_sorted_data(project_compilation):
data = np.array([(image.fixture.orientation_marks_x, image.fixture.orientation_marks_y) for
image in project_compilation])
lengths = data.sum(axis=1)
norm = np.median(lengths, axis=0)
sortorder = np.argmin(np.subtract.outer(lengths, norm) ** 2, axis=-1)
return np.array([d[:, s] for d, s in izip(data, sortorder)])
@_input_validate
def get_images_with_irregularities(project_compilation, only_image_indices=False):
data = set(get_grayscale_outlier_images(project_compilation, only_image_indices=only_image_indices)).union(
get_positional_marker_outlier_images(project_compilation, only_image_indices=only_image_indices))
if only_image_indices:
return sorted(data)
else:
return sorted(data, key=lambda x: x.image.index)
| gpl-3.0 |
aetilley/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
CUFCTFACE/face-recognition | scripts/plot.py | 4 | 2754 | #!/usr/bin/python
# Create plots for log files from experiments.
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import os
def load_logfile(fname):
fp = open(fname, "rt")
X = [[], [], [], []]
for line in fp:
data = line.split()
for i in xrange(4):
X[i].append(float(data[i]))
fp.close()
return X
def save_plot(fname, plots, ylim, xlabel, ylabel):
for p in plots:
plt.plot(p[0], p[1], p[2])
plt.ylim(ylim)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if len(plots) > 1:
plt.legend([p[3] for p in plots])
plt.savefig(fname)
plt.clf()
if not os.path.exists("figures"):
os.mkdir("figures", 0755)
X_pca_n1 = load_logfile("logs/feret-pca-n1.log")
X_lda_n1 = load_logfile("logs/feret-lda-n1.log")
X_lda_n2 = load_logfile("logs/feret-lda-n2.log")
X_ica_n1 = load_logfile("logs/feret-ica-n1.log")
X_ica_n2 = load_logfile("logs/feret-ica-n2.log")
X_knn_k = load_logfile("logs/feret-knn-k.log")
save_plot("figures/pca_acc.eps", [[X_pca_n1[0], X_pca_n1[1], "k"]], (0, 100), "Hyperparameter value", "Accuracy (%)")
save_plot("figures/pca_train.eps", [[X_pca_n1[0], X_pca_n1[2], "k"]], (0, 120), "Hyperparameter value", "Training time (s)")
save_plot("figures/pca_pred.eps", [[X_pca_n1[0], X_pca_n1[3], "k"]], (0, 20), "Hyperparameter value", "Prediction time (s)")
save_plot("figures/lda_acc.eps", [[X_lda_n1[0], X_lda_n1[1], "k--", "lda_n1"], [X_lda_n2[0], X_lda_n2[1], "k", "lda_n2"]], (0, 100), "Hyperparameter value", "Accuracy (%)")
save_plot("figures/lda_train.eps", [[X_lda_n1[0], X_lda_n1[2], "k--", "lda_n1"], [X_lda_n2[0], X_lda_n2[2], "k", "lda_n2"]], (0, 120), "Hyperparameter value", "Training time (s)")
save_plot("figures/lda_pred.eps", [[X_lda_n1[0], X_lda_n1[3], "k--", "lda_n1"], [X_lda_n2[0], X_lda_n2[3], "k", "lda_n2"]], (0, 20), "Hyperparameter value", "Prediction time (s)")
save_plot("figures/ica_acc.eps", [[X_ica_n1[0], X_ica_n1[1], "k--", "ica_n1"], [X_ica_n2[0], X_ica_n2[1], "k", "ica_n2"]], (0, 100), "Hyperparameter value", "Accuracy (%)")
save_plot("figures/ica_train.eps", [[X_ica_n1[0], X_ica_n1[2], "k--", "ica_n1"], [X_ica_n2[0], X_ica_n2[2], "k", "ica_n2"]], (0, 120), "Hyperparameter value", "Training time (s)")
save_plot("figures/ica_pred.eps", [[X_ica_n1[0], X_ica_n1[3], "k--", "ica_n1"], [X_ica_n2[0], X_ica_n2[3], "k", "ica_n2"]], (0, 20), "Hyperparameter value", "Prediction time (s)")
save_plot("figures/knn_acc.eps", [[X_knn_k[0], X_knn_k[1], "k"]], (0, 100), "Hyperparameter value", "Accuracy (%)")
save_plot("figures/knn_train.eps", [[X_knn_k[0], X_knn_k[2], "k"]], (0, 120), "Hyperparameter value", "Training time (s)")
save_plot("figures/knn_pred.eps", [[X_knn_k[0], X_knn_k[3], "k"]], (0, 20), "Hyperparameter value", "Prediction time (s)")
| mit |
OshynSong/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 93 | 2471 | # Author: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_array_equal(xi.data, X_csr[idx].data)
assert_array_equal(xi.indices, X_csr[idx].indices)
assert_array_equal(xi.indptr, X_csr[idx].indptr)
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_array_equal(xi.data, X_csr[idx].data)
assert_array_equal(xi.indices, X_csr[idx].indices)
assert_array_equal(xi.indptr, X_csr[idx].indptr)
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
sowe9385/qiime | scripts/plot_taxa_summary.py | 15 | 12355 | #!/usr/bin/env python
# File created on 19 Jan 2011
from __future__ import division
__author__ = "Jesse Stombaugh"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Jesse Stombaugh", "Julia Goodrich", "Justin Kuczynski",
"John Chase", "Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jesse Stombaugh"
__email__ = "jesse.stombaugh@colorado.edu"
"""
This script generates taxonomy charts
"""
from qiime.util import parse_command_line_parameters, get_qiime_project_dir
from qiime.util import make_option
from qiime.util import create_dir
from qiime.plot_taxa_summary import make_all_charts
from tempfile import mkdtemp
from qiime.colors import taxonomy_color_prefs_and_map_data_from_options
import re
import matplotlib
import os
import shutil
plot_filetype_choices = ['pdf', 'svg', 'png']
script_info = {}
script_info['brief_description'] = """Make taxaonomy summary charts based on\
taxonomy assignment"""
script_info['script_description'] = """This script automates the construction\
of pie, bar and area charts showing the breakdown of taxonomy by given levels.\
The script creates an html file for each chart type for easy visualization. It\
uses the taxonomy or category counts from summarize_taxa.py for combined\
samples by level (-i) and user specified labels for each file passed in (-l).\
Output will be written to the user specified folder (-o) the, where the\
default is the current working directory. The user can also specify the number\
of categories displayed for within a single pie chart, where the rest are\
grouped together as the 'other category' using the (-n) option, default is 20.
"""
script_info['script_usage'] = []
script_info['script_usage'].append(("""Examples:""",
"""If you wish to run the code using default parameters, you must supply a\
counts file (phylum.txt) along with the taxon level label (Phylum), the\
type(s) of charts to produce, and an output directory, by using the following\
command:""",
"""%prog -i phylum.txt -l phylum -c pie,bar,area -o phylum_charts/"""))
script_info['script_usage'].append(("""""",
"""If you want to make charts for multiple levels at a time\
(phylum.txt,class.txt,genus.txt) use the following command:""",
"""%prog -i phylum.txt,class.txt,genus.txt -l Phylum,Class,Genus\
-c pie,bar,area -o phylum_class_genus_charts/"""))
script_info['script_usage'].append(("""""",
"""Additionally, if you would like to display on a set number of taxa ("-n 10")\
in the pie charts, you can use the following command:""",
"""%prog -i class.txt -l Class -c pie -n 10 -o class_pie_n10_charts/"""))
script_info['script_usage'].append(("""""",
"""If you would like to display generate pie charts for specific samples, i.e.\
sample 'PC.636' and sample 'PC.635' that are in the counts file header, you\
can use the following command:""",
"""%prog -i class.txt -l Class -b PC.636,PC.635 -o sample_charts/"""))
script_info['output_description'] = """The script generates an output folder,\
which contains several files. For each pie chart there is a png and a pdf\
file. The best way to view all of the pie charts is by opening up the file\
taxonomy_summary_pie_chart.html."""
script_info['required_options'] = [
# dest should equal long-form parameter names! Can you clean this up?
# Also note that you don't need to pass type='string' - that's the default
make_option('-i', '--counts_fname',
help='Input comma-separated list of summarized taxa filepaths' +
' (i.e results from summarize_taxa.py) [REQUIRED]',
type='existing_filepaths'),
]
script_info['optional_options'] = [
# changed this from type='string' (default) to type='int'
make_option('-l', '--labels',
help='Comma-separated list of taxonomic levels (e.g.' +
' Phylum,Class,Order) [default=%default]', default=None),
make_option('-n', '--num_categories', dest='num_categories',
help='The maximum number of taxonomies to show in each pie chart.' +
' All additional taxonomies are grouped into an "other" category.' +
' NOTE: this functionality only applies to the pie charts.' +
' [default: %default]', default=20, type='int'),
make_option('-o', '--dir_path',
help='Output directory',
type='new_dirpath'),
make_option('-b', '--colorby', dest='colorby', type='string',
help='This is the categories to color by in the plots from the' +
' metadata mapping file. The categories must match the name of a ' +
' column header in the mapping file exactly and multiple categories' +
' can be list by comma separating them without spaces.' +
' [default=%default]'),
make_option('-p', '--prefs_path',
help='Input user-generated preferences filepath. NOTE: This is a' +
' file with a dictionary containing preferences for the analysis.' +
' The key taxonomy_coloring is used for the coloring.' +
' [default: %default]',
type='existing_filepath'),
make_option('-k', '--background_color',
help='This is the background color to use in the plots' +
' (black or white) [default: %default]', default='white',
type='choice', choices=['black', 'white'],),
make_option('-d', '--dpi',
help='This is the resolution of the plot. [default: %default]',
type='int', default=80),
make_option('-x', '--x_width',
help='This is the width of the x-axis to use in the plots.' +
' [default: %default]', default=12, type='int'),
make_option('-y', '--y_height',
help='This is the height of the y-axis to use in the plots.' +
' [default: %default]', default=6, type='int'),
make_option('-w', '--bar_width',
help='This the width of the bars in the bar graph and should be a' +
' number between 0 and 1. NOTE: this only applies to the bar charts.' +
' [default: %default]', default=0.75, type='float'),
make_option('-t', '--type_of_file', type='choice',
help='This is the type of image to produce (i.e. ' +
','.join(plot_filetype_choices) + '). [default: %default]',
choices=plot_filetype_choices, default='pdf'),
make_option('-c', '--chart_type', type='multiple_choice',
mchoices=['pie', 'bar', 'area'],
help='This is the type of chart to plot (i.e. pie, bar or area).' +
' The user has the ability to plot multiple types, by using a' +
' comma-separated list (e.g. area,pie) [default: %default]',
default='area,bar'),
make_option('-r', '--resize_nth_label', type='int',
help='Make every nth label larger than the other lables.' +
' This is for large area and bar charts where the font on the x-axis' +
' is small. This requires an integer value greater than 0.' +
' [default: %default]', default=0),
make_option('-s', '--include_html_legend', action='store_true',
dest='include_html_legend', default=False,
help='Include HTML legend. If present, the writing of the legend' +
' in the html page is included. [default: %default]'),
make_option('-a', '--label_type', type='choice',
help='Label type ("numeric" or "categorical"). ' +
' If the label type is defined as numeric, the x-axis will be' +
' scaled accordingly. Otherwise the x-values will treated' +
' categorically and be evenly spaced [default: %default].',
choices=['categorical', 'numeric'], default='categorical'),
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
# get QIIME directory
qiime_dir = get_qiime_project_dir()
if not opts.counts_fname:
option_parser.error("A list of input files must be specified")
# get color preferences
color_prefs, color_data, background_color, label_color = \
taxonomy_color_prefs_and_map_data_from_options(opts)
colorby = opts.colorby
if colorby is None:
colorby = []
for c in color_data['counts'].values():
colorby.extend(c[0])
else:
colorby = colorby.strip().strip("'").split(',')
counts_fname = opts.counts_fname
# Define labels to use
labels = opts.labels
if not opts.labels:
new_labels = []
# create an empty list since the user didn't specify labels
for i in counts_fname:
new_labels.append("")
labels = ','.join(new_labels)
data = [(label, f.strip())
for f, label in zip(counts_fname, labels.split(","))]
filepath = data[0][1]
filename = filepath.strip().rpartition('/')[0]
num_categories = int(opts.num_categories)
if num_categories <= 0:
raise ValueError('The number of categories has to be greater than 0!')
# create directory path
dir_path = os.getcwd()
if opts.dir_path:
dir_path = opts.dir_path
try:
create_dir(opts.dir_path)
except OSError:
pass
# make javascript output directory
javascript_path = os.path.join(dir_path, 'js')
try:
create_dir(javascript_path)
except OSError: # raised if dir exists
pass
# make raw_data output directory
raw_data_path = os.path.join(dir_path, 'raw_data')
try:
create_dir(raw_data_path)
except OSError: # raised if dir exists
pass
# move javascript file to javascript output directory
shutil.copyfile(os.path.join(qiime_dir, 'qiime', 'support_files',
'js/overlib.js'),
os.path.join(javascript_path, 'overlib.js'))
# make css output directory
css_path = os.path.join(dir_path, 'css')
try:
create_dir(css_path)
except OSError: # raised if dir exists
pass
# move css file to css output directory
shutil.copyfile(os.path.join(qiime_dir, 'qiime', 'support_files',
'css/qiime_style.css'),
os.path.join(css_path, 'qiime_style.css'))
# verify all parameters are valid
plot_width = float(opts.x_width)
if plot_width <= 0:
raise ValueError('The width of the plot has to be greater than 0!')
plot_height = float(opts.y_height)
if plot_height <= 0:
raise ValueError('The height of the plot has to be greater than 0!')
bar_width = float(opts.bar_width)
if bar_width <= 0 or bar_width > 1:
raise ValueError(
'The bar width of the plot has to be between 0 and 1!')
dpi = float(opts.dpi)
if dpi <= 0:
raise ValueError('The dpi of the plot has to be greater than 0!')
resize_nth_label = int(opts.resize_nth_label)
if resize_nth_label < 0:
raise ValueError('The resize_nth_label of the plot has to be greater\
than 0!')
generate_image_type = opts.type_of_file
label_type = opts.label_type
include_html_legend = opts.include_html_legend
plots_to_make = opts.chart_type
for chart_type in plots_to_make:
# make pie chart output path
charts_path = os.path.join(dir_path, 'charts')
try:
create_dir(charts_path)
except OSError: # raised if dir exists
pass
make_all_charts(data, dir_path, filename, num_categories,
colorby, args, color_data, color_prefs, background_color, label_color,
chart_type, generate_image_type, plot_width, plot_height, bar_width, dpi,
resize_nth_label, label_type, include_html_legend)
if __name__ == "__main__":
main()
| gpl-2.0 |
vivekpatani/recommendation-system | UI.py | 3 | 3385 | """
Simple User Interface
"""
from movielens import *
from sklearn.cluster import KMeans
import numpy as np
import pickle
import random
import sys
import time
user = []
item = []
d = Dataset()
d.load_users("data/u.user", user)
d.load_items("data/u.item", item)
n_users = len(user)
n_items = len(item)
utility_matrix = pickle.load( open("utility_matrix.pkl", "rb") )
# Find the average rating for each user and stores it in the user's object
for i in range(0, n_users):
x = utility_matrix[i]
user[i].avg_r = sum(a for a in x if a > 0) / sum(a > 0 for a in x)
# Find the Pearson Correlation Similarity Measure between two users
def pcs(x, y, ut):
num = 0
den1 = 0
den2 = 0
A = ut[x - 1]
B = ut[y - 1]
num = sum((a - user[x - 1].avg_r) * (b - user[y - 1].avg_r) for a, b in zip(A, B) if a > 0 and b > 0)
den1 = sum((a - user[x - 1].avg_r) ** 2 for a in A if a > 0)
den2 = sum((b - user[y - 1].avg_r) ** 2 for b in B if b > 0)
den = (den1 ** 0.5) * (den2 ** 0.5)
if den == 0:
return 0
else:
return num / den
# Perform clustering on items
movie_genre = []
for movie in item:
movie_genre.append([movie.unknown, movie.action, movie.adventure, movie.animation, movie.childrens, movie.comedy,
movie.crime, movie.documentary, movie.drama, movie.fantasy, movie.film_noir, movie.horror,
movie.musical, movie.mystery, movie.romance, movie.sci_fi, movie.thriller, movie.war, movie.western])
movie_genre = np.array(movie_genre)
cluster = KMeans(n_clusters=19)
cluster.fit_predict(movie_genre)
ask = random.sample(item, 10)
new_user = np.zeros(19)
print "Please rate the following movies (1-5):"
for movie in ask:
print movie.title + ": "
a = int(input())
if new_user[cluster.labels_[movie.id - 1]] != 0:
new_user[cluster.labels_[movie.id - 1]] = (new_user[cluster.labels_[movie.id - 1]] + a) / 2
else:
new_user[cluster.labels_[movie.id - 1]] = a
utility_new = np.vstack((utility_matrix, new_user))
user.append(User(944, 21, 'M', 'student', 110018))
pcs_matrix = np.zeros(n_users)
print "Finding users which have similar preferences."
for i in range(0, n_users + 1):
if i != 943:
pcs_matrix[i] = pcs(944, i + 1, utility_new)
user_index = []
for i in user:
user_index.append(i.id - 1)
user_index = user_index[:943]
user_index = np.array(user_index)
top_5 = [x for (y,x) in sorted(zip(pcs_matrix, user_index), key=lambda pair: pair[0], reverse=True)]
top_5 = top_5[:5]
top_5_genre = []
for i in range(0, 5):
maxi = 0
maxe = 0
for j in range(0, 19):
if maxe < utility_matrix[top_5[i]][j]:
maxe = utility_matrix[top_5[i]][j]
maxi = j
top_5_genre.append(maxi)
print "Movie genres you'd like:"
for i in top_5_genre:
if i == 0:
print "unknown"
elif i == 1:
print "action"
elif i == 2:
print "adventure"
elif i == 3:
print "animation"
elif i == 4:
print "childrens"
elif i == 5:
print "comedy"
elif i == 6:
print "crime"
elif i == 7:
print "documentary"
elif i == 8:
print "drama"
elif i == 9:
print "fantasy"
elif i == 10:
print "film_noir"
elif i == 11:
print "horror"
elif i == 12:
print "musical"
elif i == 13:
print "mystery"
elif i == 14:
print "romance"
elif i == 15:
print "science fiction"
elif i == 16:
print "thriller"
elif i == 17:
print "war"
else:
print "western" | mit |
subutai/htmresearch | projects/sdr_paper/scalar_sdrs.py | 2 | 20118 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This code computes, through simulation, the probability of matching two random
scalar sparse vectors. Xw and Xi both have dimensionality n.
A "match" occurs when Xw dot Xi > theta.
We can test probabilities under different initialization conditions for Xi and
Xw, and for different theta's. We can get nice exponential dropoffs with
dimensionality, similar to binary sparse vectors, under the following
conditions:
|Xw|_0 = k
|Xi|_0 = a
Non-zero entries in Xw are uniform in [-1/k, 1/k]
Non-zero entries in Xi are uniform in S*[0, 2/k]
Here Xw is the putative weight vector and Xi is a positive input vector
(positive because presumably it is after a non-linearity such as ReLU or
K-Winners). Theta is defined as mean(Xw dot Xw) / 2.0. We define it this way to
provide a certain amount of invariance to noise in the inputs. A pretty
corrupted version of Xw will still match Xw.
S controls the scale of Xi relative to Xw. By varying S, we can plot the
effect of scaling on the match probabilities.
"""
from __future__ import print_function
import time
from multiprocessing import Pool
import torch
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.figure import figaspect
def getSparseTensor(numNonzeros, inputSize, outputSize,
onlyPositive=False,
fixedRange=1.0/24):
"""
Return a random tensor that is initialized like a weight matrix
Size is outputSize X inputSize, where weightSparsity% of each row is non-zero
"""
# Initialize weights in the typical fashion.
w = torch.Tensor(outputSize, inputSize, )
if onlyPositive:
w.data.uniform_(0, fixedRange)
else:
w.data.uniform_(-fixedRange, fixedRange)
# Zero out weights for sparse weight matrices
if numNonzeros < inputSize:
numZeros = inputSize - numNonzeros
outputIndices = np.arange(outputSize)
inputIndices = np.array([np.random.permutation(inputSize)[:numZeros]
for _ in outputIndices], dtype=np.long)
# Create tensor indices for all non-zero weights
zeroIndices = np.empty((outputSize, numZeros, 2), dtype=np.long)
zeroIndices[:, :, 0] = outputIndices[:, None]
zeroIndices[:, :, 1] = inputIndices
zeroIndices = torch.LongTensor(zeroIndices.reshape(-1, 2))
zeroWts = (zeroIndices[:, 0], zeroIndices[:, 1])
w.data[zeroWts] = 0.0
return w
def getPermutedTensors(W, kw, n, m2, noisePct):
"""
Generate m2 noisy versions of W. Noisy
version of W is generated by randomly permuting noisePct of the non-zero
components to other components.
:param W:
:param n:
:param m2:
:param noisePct:
:return:
"""
W2 = W.repeat(m2, 1)
nz = W[0].nonzero()
numberToZero = int(round(noisePct * kw))
for i in range(m2):
indices = np.random.permutation(kw)[0:numberToZero]
for j in indices:
W2[i,nz[j]] = 0
return W2
def plotDot(dot, title="Histogram of dot products",
path="dot.pdf"):
bins = np.linspace(dot.min(), dot.max(), 100)
plt.hist(dot, bins, alpha=0.5, label='All cols')
plt.title(title)
plt.xlabel("Dot product")
plt.ylabel("Number")
plt.savefig(path)
plt.close()
def getTheta(k, nTrials=100000):
"""
Estimate a reasonable value of theta for this k.
"""
theDots = np.zeros(nTrials)
w1 = getSparseTensor(k, k, nTrials, fixedRange=1.0/k)
for i in range(nTrials):
theDots[i] = w1[i].dot(w1[i])
dotMean = theDots.mean()
print("k=", k, "min/mean/max diag of w dot products",
theDots.min(), dotMean, theDots.max())
theta = dotMean / 2.0
print("Using theta as mean / 2.0 = ", theta)
return theta, theDots
def returnMatches(kw, kv, n, theta, inputScaling=1.0):
"""
:param kw: k for the weight vectors
:param kv: k for the input vectors
:param n: dimensionality of input vector
:param theta: threshold for matching after dot product
:return: percent that matched, number that matched, total match comparisons
"""
# How many weight vectors and input vectors to generate at a time
m1 = 4
m2 = 1000
weights = getSparseTensor(kw, n, m1, fixedRange=1.0 / kw)
# Initialize random input vectors using given scaling and see how many match
inputVectors = getSparseTensor(kv, n, m2,
onlyPositive=True,
fixedRange= 2*inputScaling / kw,
)
dot = inputVectors.matmul(weights.t())
numMatches = ((dot >= theta).sum()).item()
pctMatches = numMatches / float(m1*m2)
return pctMatches, numMatches, m1*m2
def returnFalseNegatives(kw, noisePct, n, theta):
"""
Generate a weight vector W, with kw non-zero components. Generate 1000
noisy versions of W and return the match statistics. Noisy version of W is
generated by randomly setting noisePct of the non-zero components to zero.
:param kw: k for the weight vectors
:param noisePct: percent noise, from 0 to 1
:param n: dimensionality of input vector
:param theta: threshold for matching after dot product
:return: percent that matched, number that matched, total match comparisons
"""
W = getSparseTensor(kw, n, 1, fixedRange=1.0 / kw)
# Get permuted versions of W and see how many match
m2 = 10
inputVectors = getPermutedTensors(W, kw, n, m2, noisePct)
dot = inputVectors.matmul(W.t())
numMatches = ((dot >= theta).sum()).item()
pctMatches = numMatches / float(m2)
return pctMatches, numMatches, m2
def computeFalseNegatives(args):
n = args["n"]
kw = args["kw"]
noisePct = args["noisePct"]
nTrials = args["nTrials"]
theta, _ = getTheta(kw)
numMatches = 0
totalComparisons = 0
for t in range(nTrials):
pct, num, total = returnFalseNegatives(kw, noisePct, n, theta)
numMatches += num
totalComparisons += total
pctFalseNegatives = 1.0 - float(numMatches) / totalComparisons
print("kw, n, noise:", kw, n, noisePct,
", matches:", numMatches,
", comparisons:", totalComparisons,
", pct false negatives:", pctFalseNegatives)
args.update({"pctFalse": pctFalseNegatives})
return args
def computeFalseNegativesParallel(
listofNoise=[0.1, 0.2, 0.3, 0.4, 0.45, 0.5, 0.55, 0.6, 0.7, 0.8],
kw=24,
numWorkers=8,
nTrials=1000,
n=500,
):
print("Computing match probabilities for kw=", kw)
# Create arguments for the possibilities we want to test
args = []
for ni, noise in enumerate(listofNoise):
args.append({
"kw": kw, "n": n,
"noisePct": noise,
"nTrials": nTrials,
"errorIndex": ni,
})
numExperiments = len(args)
if numWorkers > 1:
pool = Pool(processes=numWorkers)
rs = pool.map_async(computeFalseNegatives, args, chunksize=1)
while not rs.ready():
remaining = rs._number_left
pctDone = 100.0 - (100.0*remaining) / numExperiments
print(" =>", remaining,
"experiments remaining, percent complete=",pctDone)
time.sleep(5)
pool.close() # No more work
pool.join()
result = rs.get()
else:
result = []
for arg in args:
result.append(computeFalseNegatives(arg))
# Read out results and store in numpy array for plotting
errors = np.zeros(len(listofNoise))
for r in result:
errors[r["errorIndex"]] = r["pctFalse"]
print("Errors for kw=", kw)
print(errors)
plotFalseMatches(listofNoise, errors,kw,
"images/scalar_false_matches_kw" + str(kw) + ".pdf")
def computeMatchProbability(args):
"""
Runs a number of trials of returnMatches() and returns an overall probability
of matches given the parameters.
:param args is a dictionary containing the following keys:
kw: k for the weight vectors
kv: k for the input vectors. If -1, kv is set to n/2
n: dimensionality of input vector
theta: threshold for matching after dot product
nTrials: number of trials to run
inputScaling: scale factor for the input vectors. 1.0 means the scaling
is the same as the stored weight vectors.
:return: args updated with the percent that matched
"""
kv = args["k"]
n = args["n"]
kw = args["kw"]
theta = args["theta"]
if kv == -1:
kv = int(round(n/2.0))
numMatches = 0
totalComparisons = 0
for t in range(args["nTrials"]):
pct, num, total = returnMatches(kw, kv, n, theta, args["inputScaling"])
numMatches += num
totalComparisons += total
pctMatches = float(numMatches) / totalComparisons
print("kw, kv, n, s:", kw, kv, n, args["inputScaling"],
", matches:", numMatches,
", comparisons:", totalComparisons,
", pct matches:", pctMatches)
args.update({"pctMatches": pctMatches})
return args
def computeMatchProbabilityParallel(args, numWorkers=8):
numExperiments = len(args)
if numWorkers > 1:
pool = Pool(processes=numWorkers)
rs = pool.map_async(computeMatchProbability, args, chunksize=1)
while not rs.ready():
remaining = rs._number_left
pctDone = 100.0 - (100.0*remaining) / numExperiments
print(" =>", remaining,
"experiments remaining, percent complete=",pctDone)
time.sleep(5)
pool.close() # No more work
pool.join()
result = rs.get()
else:
result = []
for arg in args:
result.append(computeMatchProbability(arg))
return result
def computeMatchProbabilities(listofkValues=[64, 128, 256, -1],
listofNValues=[250, 500, 1000, 1500, 2000, 2500],
inputScale=1.0,
kw=24,
numWorkers=10,
nTrials=1000,
):
print("Computing match probabilities for input scale=", inputScale)
# Create arguments for the possibilities we want to test
args = []
theta, _ = getTheta(kw)
for ki, k in enumerate(listofkValues):
for ni, n in enumerate(listofNValues):
args.append({
"k": k, "kw": kw, "n": n, "theta": theta,
"nTrials": nTrials, "inputScaling": inputScale,
"errorIndex": [ki, ni],
})
result = computeMatchProbabilityParallel(args, numWorkers)
# Read out results and store in numpy array for plotting
errors = np.zeros((len(listofkValues), len(listofNValues)))
for r in result:
errors[r["errorIndex"][0], r["errorIndex"][1]] = r["pctMatches"]
print("Errors for kw=", kw)
print(repr(errors))
plotMatches(listofNValues, errors,
"images/scalar_effect_of_n_kw" + str(kw) + ".pdf")
def computeScaledProbabilities(
listOfScales=[1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0],
listofkValues=[64, 128, 256],
kw=32,
n=1000,
numWorkers=10,
nTrials=1000,
):
"""
Compute the impact of S on match probabilities for a fixed value of n.
"""
# Create arguments for the possibilities we want to test
args = []
theta, _ = getTheta(kw)
for ki, k in enumerate(listofkValues):
for si, s in enumerate(listOfScales):
args.append({
"k": k, "kw": kw, "n": n, "theta": theta,
"nTrials": nTrials, "inputScaling": s,
"errorIndex": [ki, si],
})
result = computeMatchProbabilityParallel(args, numWorkers)
errors = np.zeros((len(listofkValues), len(listOfScales)))
for r in result:
errors[r["errorIndex"][0], r["errorIndex"][1]] = r["pctMatches"]
print("Errors using scaled inputs, for kw=", kw)
print(repr(errors))
plotScaledMatches(listofkValues, listOfScales, errors,
"images/scalar_effect_of_scale_kw" + str(kw) + ".pdf")
def computeMatchProbabilityOmega(k, bMax, theta, nTrials=100):
"""
The Omega match probability estimates the probability of matching when
both vectors have exactly b components in common. This function computes
this probability for b=1 to bMax.
For each value of b this function:
1) Creates nTrials instances of Xw(b) which are vectors with b components
where each component is uniform in [-1/k, 1/k].
2) Creates nTrials instances of Xi(b) which are vectors with b components
where each component is uniform in [0, 2/k].
3) Does every possible dot product of Xw(b) dot Xi(b), i.e. nTrials * nTrials
dot products.
4) Counts the fraction of cases where Xw(b) dot Xi(b) >= theta
Returns an array with bMax entries, where each entry contains the
probability computed in 4).
"""
omegaProb = np.zeros(bMax+1)
for b in range(1, bMax+1):
xwb = getSparseTensor(b, b, nTrials, fixedRange=1.0/k)
xib = getSparseTensor(b, b, nTrials, onlyPositive=True, fixedRange=2.0/k)
r = xwb.matmul(xib.t())
numMatches = ((r >= theta).sum()).item()
omegaProb[b] = numMatches / float(nTrials * nTrials)
print(omegaProb)
return omegaProb
def plotMatches(listofNValues, errors,
fileName = "images/scalar_effect_of_n.pdf",
fig=None, ax=None):
if fig is None:
fig, ax = plt.subplots()
fig.suptitle("Probability of matching sparse scalar vectors")
ax.set_xlabel("Dimensionality (n)")
ax.set_ylabel("Frequency of matches")
ax.set_yscale("log")
ax.plot(listofNValues, errors[0,:], 'k:',
label="a=64 (predicted)", marker="o", color='black')
ax.plot(listofNValues, errors[1,:], 'k:',
label="a=128 (predicted)", marker="o", color='black')
ax.plot(listofNValues, errors[2,:], 'k:',
label="a=256 (predicted)", marker="o", color='black')
ax.plot(listofNValues, errors[3,:], 'k:',
label="a=n/2 (predicted)", marker="o", color='black')
ax.annotate(r"$a = 64$", xy=(listofNValues[3]+100, errors[0,3]),
xytext=(-5, 2), textcoords="offset points", ha="left",
color='black')
ax.annotate(r"$a = 128$", xy=(listofNValues[3]+100, errors[1,3]),
ha="left", color='black')
ax.annotate(r"$a = 256$", xy=(listofNValues[3]+100, errors[2,3]),
ha="left", color='black')
ax.annotate(r"$a = \frac{n}{2}$", xy=(listofNValues[3]+100, errors[3, 3]/2.0),
ha="left", color='black')
ax.minorticks_off()
ax.grid(True, alpha=0.3)
if fileName is not None:
plt.savefig(fileName)
plt.close()
def plotScaledMatches(listOfScales, errors,
fileName = "images/scalar_effect_of_scale.pdf",
fig=None, ax=None):
if fig is None:
fig, ax = plt.subplots()
fig.suptitle("Matching sparse scalar vectors: effect of scale")
ax.set_xlabel("Scale factor (s)")
ax.set_ylabel("Frequency of matches")
ax.set_yscale("log")
ax.plot(listOfScales, errors[0, :], 'k:',
label="a=64 (predicted)", marker="o", color='black')
ax.plot(listOfScales, errors[1, :], 'k:',
label="a=128 (predicted)", marker="o", color='black')
ax.plot(listOfScales, errors[2, :], 'k:',
label="a=128 (predicted)", marker="o", color='black')
ax.annotate(r"$a=64$",
xy=(listOfScales[1]+0.2, errors[0, 1]),
xytext=(-5, 2), textcoords="offset points", ha="left",
color='black')
ax.annotate(r"$a=128$",
xy=(listOfScales[1]-0.1, (2*errors[1, 1] + errors[1, 2]) / 3.0),
ha="left", color='black')
ax.annotate(r"$a=256$",
xy=(listOfScales[1]-0.1, (errors[2, 1] + errors[2, 2]) / 2.0),
ha="left", color='black')
ax.minorticks_off()
ax.grid(True, alpha=0.3)
if fileName is not None:
plt.savefig(fileName)
plt.close()
def plotThetaDistribution(kw, fileName = "images/theta_distribution.pdf"):
theta, theDots = getTheta(kw)
# Plot histogram of overlaps
bins = np.linspace(float(theDots.min()), float(theDots.max()), 50)
plt.hist(theDots, bins, alpha=0.5, label='Dot products')
plt.legend(loc='upper right')
plt.xlabel("Dot product")
plt.ylabel("Frequency")
plt.title("Distribution of dot products, kw=" + str(kw))
plt.savefig(fileName)
plt.close()
def plotFalseMatches(listOfNoise, errors, kw,
fileName = "images/scalar_false_positives.pdf"):
fig, ax = plt.subplots()
fig.suptitle("Probability of false negatives with $k_w$=" + str(kw))
ax.set_xlabel("Pct of components set to zero")
ax.set_ylabel("Frequency of false negatives")
# ax.set_yscale("log")
ax.plot(listOfNoise, errors, 'k:', marker="o", color='black')
plt.minorticks_off()
plt.grid(True, alpha=0.3)
plt.savefig(fileName)
plt.close()
def plotMatches2(listofNValues, errors,
listOfScales, scaleErrors,
fileName = "images/scalar_matches.pdf"):
"""
Plot two figures side by side in an aspect ratio appropriate for the paper.
"""
w, h = figaspect(0.4)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(w,h))
plotMatches(listofNValues, errors, fileName=None, fig=fig, ax=ax1)
plotScaledMatches(listOfScales, scaleErrors, fileName=None, fig=fig, ax=ax2)
plt.savefig(fileName)
plt.close()
def createPregeneratedGraphs():
"""
Creates graphs based on previous runs of the scripts. Useful for editing
graph format for writeups.
"""
# Graph for computeMatchProbabilities(kw=32, nTrials=3000)
listofNValues = [250, 500, 1000, 1500, 2000, 2500]
kw = 32
errors = np.array([
[3.65083333e-03, 3.06166667e-04, 1.89166667e-05,
4.16666667e-06, 1.50000000e-06, 9.16666667e-07],
[2.44633333e-02, 3.64491667e-03, 3.16083333e-04,
6.93333333e-05, 2.16666667e-05, 8.66666667e-06],
[7.61641667e-02, 2.42496667e-02, 3.75608333e-03,
9.78333333e-04, 3.33250000e-04, 1.42250000e-04],
[2.31302500e-02, 2.38609167e-02, 2.28072500e-02,
2.33225000e-02, 2.30650000e-02, 2.33988333e-02]
])
# Graph for computeScaledProbabilities(nTrials=3000)
listOfScales = [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0]
scaleErrors = np.array([
[1.94166667e-05, 1.14900000e-03, 7.20725000e-03, 1.92405833e-02,
3.60794167e-02, 5.70276667e-02, 7.88510833e-02],
[3.12500000e-04, 7.07616667e-03, 2.71600000e-02, 5.72415833e-02,
8.95497500e-02, 1.21294333e-01, 1.50582500e-01],
[3.97708333e-03, 3.31468333e-02, 8.04755833e-02, 1.28687750e-01,
1.71220000e-01, 2.07019250e-01, 2.34703167e-01]
])
plotMatches2(listofNValues, errors,
listOfScales, scaleErrors,
"images/scalar_matches_kw" + str(kw) + ".pdf")
if __name__ == '__main__':
# The main graphs (takes about 12-15 mins each)
#
# computeMatchProbabilities(kw=32, nTrials=3000)
# computeScaledProbabilities(nTrials=3000)
# These are graphs using pregenerated numbers for the above
# createPregeneratedGraphs()
theta, _ = getTheta(32)
computeMatchProbabilityOmega(32.0, 32, theta)
# computeMatchProbabilities(kw=24, nTrials=1000)
# computeMatchProbabilities(kw=16, nTrials=3000)
# computeMatchProbabilities(kw=48, nTrials=3000)
# computeMatchProbabilities(kw=64, nTrials=3000)
# computeMatchProbabilities(kw=96, nTrials=3000)
# plotThetaDistribution(32)
# computeFalseNegativesParallel(kw=32, nTrials=10000)
# computeFalseNegativesParallel(kw=64, nTrials=10000)
# computeFalseNegativesParallel(kw=128, nTrials=10000)
| agpl-3.0 |
cwu2011/scikit-learn | sklearn/semi_supervised/label_propagation.py | 128 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
thunlp/OpenNE | src/openne/tadw.py | 1 | 4372 | from __future__ import print_function
import math
import numpy as np
from numpy import linalg as la
from sklearn.preprocessing import normalize
from .gcn.utils import *
class TADW(object):
def __init__(self, graph, dim, lamb=0.2):
self.g = graph
self.lamb = lamb
self.dim = int(dim/2)
self.train()
def getAdj(self):
graph = self.g.G
node_size = self.g.node_size
look_up = self.g.look_up_dict
adj = np.zeros((node_size, node_size))
for edge in self.g.G.edges():
adj[look_up[edge[0]]][look_up[edge[1]]] = 1.0
adj[look_up[edge[1]]][look_up[edge[0]]] = 1.0
# ScaleSimMat
return adj/np.sum(adj, axis=1)
def save_embeddings(self, filename):
fout = open(filename, 'w')
node_num = len(self.vectors.keys())
fout.write("{} {}\n".format(node_num, self.dim*2))
for node, vec in self.vectors.items():
fout.write("{} {}\n".format(node, ' '.join([str(x) for x in vec])))
fout.close()
def getT(self):
g = self.g.G
look_back = self.g.look_back_list
self.features = np.vstack([g.nodes[look_back[i]]['feature']
for i in range(g.number_of_nodes())])
self.preprocessFeature()
return self.features.T
def preprocessFeature(self):
if self.features.shape[1] > 200:
U, S, VT = la.svd(self.features)
Ud = U[:, 0:200]
Sd = S[0:200]
self.features = np.array(Ud)*Sd.reshape(200)
def train(self):
self.adj = self.getAdj()
# M=(A+A^2)/2 where A is the row-normalized adjacency matrix
self.M = (self.adj + np.dot(self.adj, self.adj))/2
# T is feature_size*node_num, text features
self.T = self.getT()
self.node_size = self.adj.shape[0]
self.feature_size = self.features.shape[1]
self.W = np.random.randn(self.dim, self.node_size)
self.H = np.random.randn(self.dim, self.feature_size)
# Update
for i in range(20):
print('Iteration ', i)
# Update W
B = np.dot(self.H, self.T)
drv = 2 * np.dot(np.dot(B, B.T), self.W) - \
2*np.dot(B, self.M.T) + self.lamb*self.W
Hess = 2*np.dot(B, B.T) + self.lamb*np.eye(self.dim)
drv = np.reshape(drv, [self.dim*self.node_size, 1])
rt = -drv
dt = rt
vecW = np.reshape(self.W, [self.dim*self.node_size, 1])
while np.linalg.norm(rt, 2) > 1e-4:
dtS = np.reshape(dt, (self.dim, self.node_size))
Hdt = np.reshape(np.dot(Hess, dtS), [
self.dim*self.node_size, 1])
at = np.dot(rt.T, rt)/np.dot(dt.T, Hdt)
vecW = vecW + at*dt
rtmp = rt
rt = rt - at*Hdt
bt = np.dot(rt.T, rt)/np.dot(rtmp.T, rtmp)
dt = rt + bt * dt
self.W = np.reshape(vecW, (self.dim, self.node_size))
# Update H
drv = np.dot((np.dot(np.dot(np.dot(self.W, self.W.T), self.H), self.T)
- np.dot(self.W, self.M.T)), self.T.T) + self.lamb*self.H
drv = np.reshape(drv, (self.dim*self.feature_size, 1))
rt = -drv
dt = rt
vecH = np.reshape(self.H, (self.dim*self.feature_size, 1))
while np.linalg.norm(rt, 2) > 1e-4:
dtS = np.reshape(dt, (self.dim, self.feature_size))
Hdt = np.reshape(np.dot(np.dot(np.dot(self.W, self.W.T), dtS), np.dot(self.T, self.T.T))
+ self.lamb*dtS, (self.dim*self.feature_size, 1))
at = np.dot(rt.T, rt)/np.dot(dt.T, Hdt)
vecH = vecH + at*dt
rtmp = rt
rt = rt - at*Hdt
bt = np.dot(rt.T, rt)/np.dot(rtmp.T, rtmp)
dt = rt + bt * dt
self.H = np.reshape(vecH, (self.dim, self.feature_size))
self.Vecs = np.hstack(
(normalize(self.W.T), normalize(np.dot(self.T.T, self.H.T))))
# get embeddings
self.vectors = {}
look_back = self.g.look_back_list
for i, embedding in enumerate(self.Vecs):
self.vectors[look_back[i]] = embedding
| mit |
ahoyosid/scikit-learn | sklearn/ensemble/partial_dependence.py | 6 | 14973 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
taylort7147/udacity-projects | titanic_survival_exploration/titanic_visualizations.py | 24 | 5425 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def filter_data(data, condition):
"""
Remove elements that do not match the condition provided.
Takes a data list as input and returns a filtered list.
Conditions should be a list of strings of the following format:
'<field> <op> <value>'
where the following operations are valid: >, <, >=, <=, ==, !=
Example: ["Sex == 'male'", 'Age < 18']
"""
field, op, value = condition.split(" ")
# convert value into number or strip excess quotes if string
try:
value = float(value)
except:
value = value.strip("\'\"")
# get booleans for filtering
if op == ">":
matches = data[field] > value
elif op == "<":
matches = data[field] < value
elif op == ">=":
matches = data[field] >= value
elif op == "<=":
matches = data[field] <= value
elif op == "==":
matches = data[field] == value
elif op == "!=":
matches = data[field] != value
else: # catch invalid operation codes
raise Exception("Invalid comparison operator. Only >, <, >=, <=, ==, != allowed.")
# filter data and outcomes
data = data[matches].reset_index(drop = True)
return data
def survival_stats(data, outcomes, key, filters = []):
"""
Print out selected statistics regarding survival, given a feature of
interest and any number of filters (including no filters)
"""
# Check that the key exists
if key not in data.columns.values :
print "'{}' is not a feature of the Titanic data. Did you spell something wrong?".format(key)
return False
# Return the function before visualizing if 'Cabin' or 'Ticket'
# is selected: too many unique categories to display
if(key == 'Cabin' or key == 'PassengerId' or key == 'Ticket'):
print "'{}' has too many unique categories to display! Try a different feature.".format(key)
return False
# Merge data and outcomes into single dataframe
all_data = pd.concat([data, outcomes], axis = 1)
# Apply filters to data
for condition in filters:
all_data = filter_data(all_data, condition)
# Create outcomes DataFrame
all_data = all_data[[key, 'Survived']]
# Create plotting figure
plt.figure(figsize=(8,6))
# 'Numerical' features
if(key == 'Age' or key == 'Fare'):
# Remove NaN values from Age data
all_data = all_data[~np.isnan(all_data[key])]
# Divide the range of data into bins and count survival rates
min_value = all_data[key].min()
max_value = all_data[key].max()
value_range = max_value - min_value
# 'Fares' has larger range of values than 'Age' so create more bins
if(key == 'Fare'):
bins = np.arange(0, all_data['Fare'].max() + 20, 20)
if(key == 'Age'):
bins = np.arange(0, all_data['Age'].max() + 10, 10)
# Overlay each bin's survival rates
nonsurv_vals = all_data[all_data['Survived'] == 0][key].reset_index(drop = True)
surv_vals = all_data[all_data['Survived'] == 1][key].reset_index(drop = True)
plt.hist(nonsurv_vals, bins = bins, alpha = 0.6,
color = 'red', label = 'Did not survive')
plt.hist(surv_vals, bins = bins, alpha = 0.6,
color = 'green', label = 'Survived')
# Add legend to plot
plt.xlim(0, bins.max())
plt.legend(framealpha = 0.8)
# 'Categorical' features
else:
# Set the various categories
if(key == 'Pclass'):
values = np.arange(1,4)
if(key == 'Parch' or key == 'SibSp'):
values = np.arange(0,np.max(data[key]) + 1)
if(key == 'Embarked'):
values = ['C', 'Q', 'S']
if(key == 'Sex'):
values = ['male', 'female']
# Create DataFrame containing categories and count of each
frame = pd.DataFrame(index = np.arange(len(values)), columns=(key,'Survived','NSurvived'))
for i, value in enumerate(values):
frame.loc[i] = [value, \
len(all_data[(all_data['Survived'] == 1) & (all_data[key] == value)]), \
len(all_data[(all_data['Survived'] == 0) & (all_data[key] == value)])]
# Set the width of each bar
bar_width = 0.4
# Display each category's survival rates
for i in np.arange(len(frame)):
nonsurv_bar = plt.bar(i-bar_width, frame.loc[i]['NSurvived'], width = bar_width, color = 'r')
surv_bar = plt.bar(i, frame.loc[i]['Survived'], width = bar_width, color = 'g')
plt.xticks(np.arange(len(frame)), values)
plt.legend((nonsurv_bar[0], surv_bar[0]),('Did not survive', 'Survived'), framealpha = 0.8)
# Common attributes for plot formatting
plt.xlabel(key)
plt.ylabel('Number of Passengers')
plt.title('Passenger Survival Statistics With \'%s\' Feature'%(key))
plt.show()
# Report number of passengers with missing values
if sum(pd.isnull(all_data[key])):
nan_outcomes = all_data[pd.isnull(all_data[key])]['Survived']
print "Passengers with missing '{}' values: {} ({} survived, {} did not survive)".format( \
key, len(nan_outcomes), sum(nan_outcomes == 1), sum(nan_outcomes == 0))
| mit |
cbmoore/statsmodels | statsmodels/graphics/functional.py | 31 | 14477 | """Module for functional boxplots."""
from statsmodels.compat.python import combinations, range
import numpy as np
from scipy import stats
from scipy.misc import factorial
from . import utils
__all__ = ['fboxplot', 'rainbowplot', 'banddepth']
def fboxplot(data, xdata=None, labels=None, depth=None, method='MBD',
wfactor=1.5, ax=None, plot_opts={}):
"""Plot functional boxplot.
A functional boxplot is the analog of a boxplot for functional data.
Functional data is any type of data that varies over a continuum, i.e.
curves, probabillity distributions, seasonal data, etc.
The data is first ordered, the order statistic used here is `banddepth`.
Plotted are then the median curve, the envelope of the 50% central region,
the maximum non-outlying envelope and the outlier curves.
Parameters
----------
data : sequence of ndarrays or 2-D ndarray
The vectors of functions to create a functional boxplot from. If a
sequence of 1-D arrays, these should all be the same size.
The first axis is the function index, the second axis the one along
which the function is defined. So ``data[0, :]`` is the first
functional curve.
xdata : ndarray, optional
The independent variable for the data. If not given, it is assumed to
be an array of integers 0..N with N the length of the vectors in
`data`.
labels : sequence of scalar or str, optional
The labels or identifiers of the curves in `data`. If given, outliers
are labeled in the plot.
depth : ndarray, optional
A 1-D array of band depths for `data`, or equivalent order statistic.
If not given, it will be calculated through `banddepth`.
method : {'MBD', 'BD2'}, optional
The method to use to calculate the band depth. Default is 'MBD'.
wfactor : float, optional
Factor by which the central 50% region is multiplied to find the outer
region (analog of "whiskers" of a classical boxplot).
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
plot_opts : dict, optional
A dictionary with plotting options. Any of the following can be
provided, if not present in `plot_opts` the defaults will be used::
- 'cmap_outliers', a Matplotlib LinearSegmentedColormap instance.
- 'c_inner', valid MPL color. Color of the central 50% region
- 'c_outer', valid MPL color. Color of the non-outlying region
- 'c_median', valid MPL color. Color of the median.
- 'lw_outliers', scalar. Linewidth for drawing outlier curves.
- 'lw_median', scalar. Linewidth for drawing the median curve.
- 'draw_nonout', bool. If True, also draw non-outlying curves.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
depth : ndarray
1-D array containing the calculated band depths of the curves.
ix_depth : ndarray
1-D array of indices needed to order curves (or `depth`) from most to
least central curve.
ix_outliers : ndarray
1-D array of indices of outlying curves in `data`.
See Also
--------
banddepth, rainbowplot
Notes
-----
The median curve is the curve with the highest band depth.
Outliers are defined as curves that fall outside the band created by
multiplying the central region by `wfactor`. Note that the range over
which they fall outside this band doesn't matter, a single data point
outside the band is enough. If the data is noisy, smoothing may therefore
be required.
The non-outlying region is defined as the band made up of all the
non-outlying curves.
References
----------
[1] Y. Sun and M.G. Genton, "Functional Boxplots", Journal of Computational
and Graphical Statistics, vol. 20, pp. 1-19, 2011.
[2] R.J. Hyndman and H.L. Shang, "Rainbow Plots, Bagplots, and Boxplots for
Functional Data", vol. 19, pp. 29-25, 2010.
Examples
--------
Load the El Nino dataset. Consists of 60 years worth of Pacific Ocean sea
surface temperature data.
>>> import matplotlib.pyplot as plt
>>> import statsmodels.api as sm
>>> data = sm.datasets.elnino.load()
Create a functional boxplot. We see that the years 1982-83 and 1997-98 are
outliers; these are the years where El Nino (a climate pattern
characterized by warming up of the sea surface and higher air pressures)
occurred with unusual intensity.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> res = sm.graphics.fboxplot(data.raw_data[:, 1:], wfactor=2.58,
... labels=data.raw_data[:, 0].astype(int),
... ax=ax)
>>> ax.set_xlabel("Month of the year")
>>> ax.set_ylabel("Sea surface temperature (C)")
>>> ax.set_xticks(np.arange(13, step=3) - 1)
>>> ax.set_xticklabels(["", "Mar", "Jun", "Sep", "Dec"])
>>> ax.set_xlim([-0.2, 11.2])
>>> plt.show()
.. plot:: plots/graphics_functional_fboxplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if plot_opts.get('cmap_outliers') is None:
from matplotlib.cm import rainbow_r
plot_opts['cmap_outliers'] = rainbow_r
data = np.asarray(data)
if xdata is None:
xdata = np.arange(data.shape[1])
# Calculate band depth if required.
if depth is None:
if method not in ['MBD', 'BD2']:
raise ValueError("Unknown value for parameter `method`.")
depth = banddepth(data, method=method)
else:
if depth.size != data.shape[0]:
raise ValueError("Provided `depth` array is not of correct size.")
# Inner area is 25%-75% region of band-depth ordered curves.
ix_depth = np.argsort(depth)[::-1]
median_curve = data[ix_depth[0], :]
ix_IQR = data.shape[0] // 2
lower = data[ix_depth[0:ix_IQR], :].min(axis=0)
upper = data[ix_depth[0:ix_IQR], :].max(axis=0)
# Determine region for outlier detection
inner_median = np.median(data[ix_depth[0:ix_IQR], :], axis=0)
lower_fence = inner_median - (inner_median - lower) * wfactor
upper_fence = inner_median + (upper - inner_median) * wfactor
# Find outliers.
ix_outliers = []
ix_nonout = []
for ii in range(data.shape[0]):
if np.any(data[ii, :] > upper_fence) or np.any(data[ii, :] < lower_fence):
ix_outliers.append(ii)
else:
ix_nonout.append(ii)
ix_outliers = np.asarray(ix_outliers)
# Plot envelope of all non-outlying data
lower_nonout = data[ix_nonout, :].min(axis=0)
upper_nonout = data[ix_nonout, :].max(axis=0)
ax.fill_between(xdata, lower_nonout, upper_nonout,
color=plot_opts.get('c_outer', (0.75,0.75,0.75)))
# Plot central 50% region
ax.fill_between(xdata, lower, upper,
color=plot_opts.get('c_inner', (0.5,0.5,0.5)))
# Plot median curve
ax.plot(xdata, median_curve, color=plot_opts.get('c_median', 'k'),
lw=plot_opts.get('lw_median', 2))
# Plot outliers
cmap = plot_opts.get('cmap_outliers')
for ii, ix in enumerate(ix_outliers):
label = str(labels[ix]) if labels is not None else None
ax.plot(xdata, data[ix, :],
color=cmap(float(ii) / (len(ix_outliers)-1)), label=label,
lw=plot_opts.get('lw_outliers', 1))
if plot_opts.get('draw_nonout', False):
for ix in ix_nonout:
ax.plot(xdata, data[ix, :], 'k-', lw=0.5)
if labels is not None:
ax.legend()
return fig, depth, ix_depth, ix_outliers
def rainbowplot(data, xdata=None, depth=None, method='MBD', ax=None,
cmap=None):
"""Create a rainbow plot for a set of curves.
A rainbow plot contains line plots of all curves in the dataset, colored in
order of functional depth. The median curve is shown in black.
Parameters
----------
data : sequence of ndarrays or 2-D ndarray
The vectors of functions to create a functional boxplot from. If a
sequence of 1-D arrays, these should all be the same size.
The first axis is the function index, the second axis the one along
which the function is defined. So ``data[0, :]`` is the first
functional curve.
xdata : ndarray, optional
The independent variable for the data. If not given, it is assumed to
be an array of integers 0..N with N the length of the vectors in
`data`.
depth : ndarray, optional
A 1-D array of band depths for `data`, or equivalent order statistic.
If not given, it will be calculated through `banddepth`.
method : {'MBD', 'BD2'}, optional
The method to use to calculate the band depth. Default is 'MBD'.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
cmap : Matplotlib LinearSegmentedColormap instance, optional
The colormap used to color curves with. Default is a rainbow colormap,
with red used for the most central and purple for the least central
curves.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
banddepth, fboxplot
References
----------
[1] R.J. Hyndman and H.L. Shang, "Rainbow Plots, Bagplots, and Boxplots for
Functional Data", vol. 19, pp. 29-25, 2010.
Examples
--------
Load the El Nino dataset. Consists of 60 years worth of Pacific Ocean sea
surface temperature data.
>>> import matplotlib.pyplot as plt
>>> import statsmodels.api as sm
>>> data = sm.datasets.elnino.load()
Create a rainbow plot:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> res = sm.graphics.rainbowplot(data.raw_data[:, 1:], ax=ax)
>>> ax.set_xlabel("Month of the year")
>>> ax.set_ylabel("Sea surface temperature (C)")
>>> ax.set_xticks(np.arange(13, step=3) - 1)
>>> ax.set_xticklabels(["", "Mar", "Jun", "Sep", "Dec"])
>>> ax.set_xlim([-0.2, 11.2])
>>> plt.show()
.. plot:: plots/graphics_functional_rainbowplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if cmap is None:
from matplotlib.cm import rainbow_r
cmap = rainbow_r
data = np.asarray(data)
if xdata is None:
xdata = np.arange(data.shape[1])
# Calculate band depth if required.
if depth is None:
if method not in ['MBD', 'BD2']:
raise ValueError("Unknown value for parameter `method`.")
depth = banddepth(data, method=method)
else:
if depth.size != data.shape[0]:
raise ValueError("Provided `depth` array is not of correct size.")
ix_depth = np.argsort(depth)[::-1]
# Plot all curves, colored by depth
num_curves = data.shape[0]
for ii in range(num_curves):
ax.plot(xdata, data[ix_depth[ii], :], c=cmap(ii / (num_curves - 1.)))
# Plot the median curve
median_curve = data[ix_depth[0], :]
ax.plot(xdata, median_curve, 'k-', lw=2)
return fig
def banddepth(data, method='MBD'):
"""Calculate the band depth for a set of functional curves.
Band depth is an order statistic for functional data (see `fboxplot`), with
a higher band depth indicating larger "centrality". In analog to scalar
data, the functional curve with highest band depth is called the median
curve, and the band made up from the first N/2 of N curves is the 50%
central region.
Parameters
----------
data : ndarray
The vectors of functions to create a functional boxplot from.
The first axis is the function index, the second axis the one along
which the function is defined. So ``data[0, :]`` is the first
functional curve.
method : {'MBD', 'BD2'}, optional
Whether to use the original band depth (with J=2) of [1]_ or the
modified band depth. See Notes for details.
Returns
-------
depth : ndarray
Depth values for functional curves.
Notes
-----
Functional band depth as an order statistic for functional data was
proposed in [1]_ and applied to functional boxplots and bagplots in [2]_.
The method 'BD2' checks for each curve whether it lies completely inside
bands constructed from two curves. All permutations of two curves in the
set of curves are used, and the band depth is normalized to one. Due to
the complete curve having to fall within the band, this method yields a lot
of ties.
The method 'MBD' is similar to 'BD2', but checks the fraction of the curve
falling within the bands. It therefore generates very few ties.
References
----------
.. [1] S. Lopez-Pintado and J. Romo, "On the Concept of Depth for
Functional Data", Journal of the American Statistical Association,
vol. 104, pp. 718-734, 2009.
.. [2] Y. Sun and M.G. Genton, "Functional Boxplots", Journal of
Computational and Graphical Statistics, vol. 20, pp. 1-19, 2011.
"""
def _band2(x1, x2, curve):
xb = np.vstack([x1, x2])
if np.any(curve < xb.min(axis=0)) or np.any(curve > xb.max(axis=0)):
res = 0
else:
res = 1
return res
def _band_mod(x1, x2, curve):
xb = np.vstack([x1, x2])
res = np.logical_and(curve >= xb.min(axis=0),
curve <= xb.max(axis=0))
return np.sum(res) / float(res.size)
if method == 'BD2':
band = _band2
elif method == 'MBD':
band = _band_mod
else:
raise ValueError("Unknown input value for parameter `method`.")
num = data.shape[0]
ix = np.arange(num)
depth = []
for ii in range(num):
res = 0
for ix1, ix2 in combinations(ix, 2):
res += band(data[ix1, :], data[ix2, :], data[ii, :])
# Normalize by number of combinations to get band depth
normfactor = factorial(num) / 2. / factorial(num - 2)
depth.append(float(res) / normfactor)
return np.asarray(depth)
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/event_handling/figure_axes_enter_leave.py | 1 | 1669 | """
=======================
Figure Axes Enter Leave
=======================
Illustrate the figure and axes enter and leave events by changing the
frame colors on enter and leave
"""
from __future__ import print_function
import matplotlib.pyplot as plt
def enter_axes(event):
print('enter_axes', event.inaxes)
event.inaxes.patch.set_facecolor('yellow')
event.canvas.draw()
def leave_axes(event):
print('leave_axes', event.inaxes)
event.inaxes.patch.set_facecolor('white')
event.canvas.draw()
def enter_figure(event):
print('enter_figure', event.canvas.figure)
event.canvas.figure.patch.set_facecolor('red')
event.canvas.draw()
def leave_figure(event):
print('leave_figure', event.canvas.figure)
event.canvas.figure.patch.set_facecolor('grey')
event.canvas.draw()
###############################################################################
fig1, (ax, ax2) = plt.subplots(2, 1)
fig1.suptitle('mouse hover over figure or axes to trigger events')
fig1.canvas.mpl_connect('figure_enter_event', enter_figure)
fig1.canvas.mpl_connect('figure_leave_event', leave_figure)
fig1.canvas.mpl_connect('axes_enter_event', enter_axes)
fig1.canvas.mpl_connect('axes_leave_event', leave_axes)
###############################################################################
fig2, (ax, ax2) = plt.subplots(2, 1)
fig2.suptitle('mouse hover over figure or axes to trigger events')
fig2.canvas.mpl_connect('figure_enter_event', enter_figure)
fig2.canvas.mpl_connect('figure_leave_event', leave_figure)
fig2.canvas.mpl_connect('axes_enter_event', enter_axes)
fig2.canvas.mpl_connect('axes_leave_event', leave_axes)
plt.show()
| mit |
Achuth17/scikit-learn | examples/cluster/plot_kmeans_digits.py | 53 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1
y_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
adybbroe/atrain_match | atrain_match/reshaped_files_scr/pps_vrreport_ctth_stats.py | 2 | 34243 | """Read all matched data and make some plotting
"""
import os
import re
from glob import glob
import numpy as np
from utils.get_flag_info import get_calipso_clouds_of_type_i_feature_classification_flags_one_layer
import matplotlib.pyplot as plt
import matplotlib
from utils.get_flag_info import (get_calipso_low_medium_high_classification,
get_inversion_info_pps2014,
get_calipso_clouds_of_type_i,
get_calipso_high_clouds,
get_calipso_medium_clouds,
get_calipso_low_clouds)
from scipy.stats import kurtosis, skewtest, skew, mode, kurtosis
from utils.stat_util import (my_hist,
my_iqr,
my_rms,
my_mae,
half_sample_mode,
half_sample_mode,
my_pe250m,
my_pe500m,
my_pe1000m,
my_pe2000m,
my_pe2500m,
my_pe5000m)
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
matplotlib.rcParams.update({'font.size': 20})
#plt.rc('text', usetex=True)
#plt.rc('font', family='serif')
#matplotlib.use('ps')
from matplotlib import rc
from my_dir import ADIR
#rc('text',usetex=True)
#rc('text.latex', preamble='\usepackage{color}')
out_filename = ADIR + "/Documents/A_PPS_v2017/Validation_2018/results_ctth_polarn.txt"
out_file_h = open(out_filename,'a')
def my_make_plot3(y, x, x2, mhl,use):
fig = plt.figure(figsize=(15, 11))
ax = fig.add_subplot(111)
use_k = np.logical_and(mhl["all_clouds_tp_thin_1layers"],use)
abias1 = np.abs(y[use_k]-x[use_k])
sort_ind = np.argsort(x[use_k])
plt.plot(x[use_k][sort_ind],'g.')
plt.plot(y[use_k][sort_ind],'b.', alpha=0.2)
#plt.show()
def my_make_plot2(y, x, x2, mhl,use):
fig = plt.figure(figsize=(15, 11))
ax = fig.add_subplot(321)
use_k = np.logical_and(use,x2>0)
#use_k = np.logical_and(use_k,x-x2>3000)
print(min(y[use_k]), len(use[use_k]))
abias1 = np.abs(y[use_k]-x[use_k])
abias2 = np.abs(y[use_k]-x2[use_k])
dist = 0.5*np.abs(x2[use_k]-x[use_k])
dist25 = 0.25*np.abs(x2[use_k]-x[use_k])
closer_to_top = np.logical_and(abias1<=abias2, np.logical_and(y[use_k]<=x[use_k], y[use_k]>=x2[use_k]))
closer_to_2 = np.logical_and(abias1>abias2, np.logical_and(y[use_k]<=x[use_k], y[use_k]>=x2[use_k]))
in_between = np.logical_and(y[use_k]<(x[use_k]), y[use_k]>(x2[use_k]))
in_between_2 = np.logical_and(in_between, np.logical_and(abias1>=1500,abias2>=1500))
close_to_level_2 = np.logical_and(abias1>2000,abias2<1500)
close_to_level_1 = abias1<1500
lower = np.logical_and(abias2>1500,y[use_k]<x2[use_k])
above = np.logical_and(abias1>1500,y[use_k]>x[use_k])
print("between", np.sum(in_between)*100.0/len(in_between),np.sum(in_between_2)*100.0/len(in_between_2))
print("det layer two", np.sum(close_to_level_2)*100.0/len(close_to_level_2))
print("det layer one", np.sum(close_to_level_1)*100.0/len(close_to_level_1))
print("lower", np.sum(lower)*100.0/len(lower))
print("above", np.sum(above)*100.0/len(above))
sort_ind = np.argsort(np.where(abias1<abias2, abias1, abias2))
print( np.mean(np.where(abias1<abias2, abias1, abias2)), np.mean(abias1), np.mean(abias2))
print( np.sum(np.where(abias1<abias2, abias1, abias2)<1000)*100.0/len(abias1))
print( np.sum(np.where(abias1<abias2, abias1, abias2)<1500)*100.0/len(abias1)) #(70%)
sort_ind_top = np.argsort(abias1[closer_to_top])
sort_ind_2 = np.argsort(abias2[closer_to_2])
plt.plot(x[use_k][closer_to_top][sort_ind_top],'g.')
plt.plot(x2[use_k][closer_to_top][sort_ind_top],'r.')
plt.plot(y[use_k][closer_to_top][sort_ind_top],'b.')
ax = fig.add_subplot(322)
plt.plot(x[use_k][closer_to_2][sort_ind_2],'g.')
plt.plot(x2[use_k][closer_to_2][sort_ind_2],'r.')
plt.plot(y[use_k][closer_to_2][sort_ind_2],'b.')
ax = fig.add_subplot(323)
plt.plot(dist[closer_to_top][sort_ind_top], '.c')
plt.plot(abias1[closer_to_top][sort_ind_top], 'k')
ax = fig.add_subplot(324)
plt.plot(dist[closer_to_2][sort_ind_2], '.c')
plt.plot(abias2[closer_to_2][sort_ind_2], 'k')
#plt.plot(abias1[sort_ind],'g.')
#plt.plot(abias2[sort_ind],'r.')
#plt.plot(np.where(abias1<abias2, abias1, abias2)[sort_ind],'k')
#plt.show()
def my_adjust_axis(ax):
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.grid(True)
leg = plt.legend(loc="upper right", markerscale=1., numpoints=1,scatterpoints=1, bbox_to_anchor=(1.2, 1.05), framealpha=1.0, frameon=True)
leg.get_frame().set_edgecolor('w')
leg.get_frame().set_facecolor('w')
#leg.get_frame().set_linewidth(0.0)
ax.set_ylim(0,10)
ax.set_xlim(-4,6)
plt.yticks(np.arange(0,9,2.0))
plt.yticks(np.arange(0,9,2.0))
def my_legend_text_6(data, text = "text"):
#label1 = text
label2 = "bias={:d}m\n".format(np.int(np.mean(data)))
label3 = "STD={:d}m\n".format(np.int(np.std(data)))
label4 = "MAE={:d}m\nIQR={:d}m\nQ2={:d}m\nPE0.5={:d}".format(
#np.int(my_rms(data)),
np.int(my_mae(data)),
np.int(my_iqr(data)),
np.int(np.median(data)),
np.int(my_pe500m(data))
)
label = label2 + label3 + label4 +'%'
return label
def my_make_plot_example(bias, use, label_str):
plt.style.use('seaborn-white')
fig = plt.figure(figsize=(11, 9))
#plt.suptitle("CTTH error distributions not well described by RMS and bias")
n_pix = 1000000
ax = fig.add_subplot(221)
temp_data = np.random.normal(900,1600, n_pix)
temp_data2 = np.concatenate([np.random.normal(-900,200, int(0.5*n_pix)), np.random.normal(+900,200, int(0.5*n_pix))])
hist_heights, x_m, dummy = my_hist(temp_data, None, bmin=-20*1000, bmax=20*1000, delta_h=100.0)
hist_heights2, x_m, dummy = my_hist(temp_data2, None, bmin=-20*1000, bmax=20*1000, delta_h=100.0)
hist_heights3, x_m, dummy = my_hist(bias, use, bmin=-20*1000, bmax=20*1000, delta_h=100.0)
x_ = x_m*0.001
ax = fig.add_subplot(221)
#S1
ax.set_title('a) Within threshold accuracy', loc='left')
ax.fill(x_, hist_heights, color='silver',
label = my_legend_text_6(temp_data, "Gaussian"))
# plt.plot([0.001*np.mean(temp_data),0.001*np.mean(temp_data)], [0,2.4], 'k:')
ax.set_ylabel('Percent')
my_adjust_axis(ax)
ax.set_xticklabels([])
#S2
ax = fig.add_subplot(222)
ax.set_title('b) Within target accuracy', loc='left')
ax.fill(x_, hist_heights2, color='grey',
label = my_legend_text_6(temp_data2, "Bi-modal"))
my_adjust_axis(ax)
ax.set_yticklabels([])
ax.set_xticklabels([])
#S3
ax = fig.add_subplot(223)
ax.set_title('c) Outside threshold accuracy', loc='left')
bias_i = bias[use]
plt.plot(x_, hist_heights3, "r-",
label = my_legend_text_6(bias_i, "PPS S-NPP"))
#plt.plot([0.001*np.mean(bias_i),0.001*np.mean(bias_i)], [0,2], 'r:')
my_adjust_axis(ax)
ax.set_ylabel('Percent')
ax.set_xlabel('error (km)')
#S4
ax = fig.add_subplot(224)
ax.set_title('d) The worst (in STD) is the best', loc='left')
ax.fill(x_, hist_heights, color='silver', label='Gaussian')
ax.fill(x_, hist_heights2, color='grey', label='Bimodal')
plt.plot(x_, hist_heights3, "r-", label='NN-CTTH')
ax.set_xlabel('error (km)')
my_adjust_axis(ax)
ax.set_yticklabels([])
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/VAL_2018_PLOTS/val_report_ctth_error_dist_%s.png"%(label_str),bbox_inches='tight')
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/VAL_2018_PLOTS/val_report_ctth_error_dist_%s.pdf"%(label_str),bbox_inches='tight')
#plt.show()
def my_make_plot_example_aprox(bias, use, label_str, caObj):
def my_legend_text(data, text = "text"):
#label1 = text
label2 = "bias: {:d}\n".format(np.int(np.mean(data)))
label3 = "STD: {:d}\n".format(np.int(np.std(data)))
label4 = "IQR: {:d}\nQ2: {:d}".format(
#np.int(my_rms(data)),
#np.int(my_mae(data)),
np.int(my_iqr(data)),
np.int(np.median(data)),
#np.int(my_pe500m(data))
)
label = label2 + label3 + label4
return label
bias_i = bias[use]
n_pix = 1000000
N_all = np.sum(use)
temp_data_gs = []
temp_data_iqrs = []
from utils.get_flag_info import get_calipso_clouds_of_type_i
for type_i in range(0,8):
#if type_i ==1:
# continue
is_type_i = np.logical_and(use, get_calipso_clouds_of_type_i(caObj, calipso_cloudtype=type_i))
n_pix_i = np.int(np.sum(is_type_i)*1.0/N_all*n_pix)
temp_data_gi = np.random.normal(np.mean(bias[is_type_i]),np.std(bias[is_type_i]), n_pix_i)
temp_data_iqri = np.random.normal(np.median(bias[is_type_i]),my_iqr(bias[is_type_i])*20.0/27, n_pix_i)
if len(temp_data_gs) == 0:
temp_data_gs = temp_data_gi
temp_data_iqrs = temp_data_iqri
else:
temp_data_gs = np.concatenate([temp_data_gi, temp_data_gs ])
temp_data_iqrs = np.concatenate([temp_data_iqri, temp_data_iqrs])
temp_data_g = np.random.normal(np.mean(bias_i),np.std(bias_i), n_pix)
temp_data_iqr = np.random.normal(np.median(bias_i),my_iqr(bias_i)*20.0/27, n_pix)
#temp_data2 = np.concatenate([np.random.normal(-900,200, int(0.5*n_pix)), np.random.normal(+900,200, int(0.5*n_pix))])
hist_heights_pps, x_m, dummy = my_hist(bias_i, None, bmin=-20*1000, bmax=20*1000, delta_h=100.0)
hist_heights_g, x_m, dummy = my_hist(temp_data_g, None, bmin=-20*1000, bmax=20*1000, delta_h=100.0)
hist_heights_iqr, x_m, dummy = my_hist(temp_data_iqr, None, bmin=-20*1000, bmax=20*1000, delta_h=100.0)
hist_heights_gs, x_m, dummy = my_hist(temp_data_gs, None, bmin=-20*1000, bmax=20*1000, delta_h=100.0)
hist_heights_iqrs, x_m, dummy = my_hist(temp_data_iqrs, None, bmin=-20*1000, bmax=20*1000, delta_h=100.0)
#hist_heights3, x_m, dummy = my_hist(bias, use, bmin=-20*1000, bmax=20*1000, delta_h=100.0)
x_ = x_m*0.001
def gaussian_fit(x,y, bias):
n = len(x) #the number of data
mean = np.median(bias) #note this correction
sigma = my_iqr(bias)*21.0/27 #note this correction
from scipy.optimize import curve_fit
from scipy import asarray as ar,exp
def gaus(x,a,x0,sigma):
return 8*exp(-np.abs(x-x0)*np.sqrt(2)/(sigma))
#popt,pcov = curve_fit(gaus,x,y,p0=[1,mean,sigma])
plt.plot(x,y,'b+:',label='data')
plt.plot(x,gaus(x,1, mean, sigma),'ro:',label='fit')
plt.show()
return 1, mean, sigma
dummy, mean_g, sigma_g = gaussian_fit(x_m, hist_heights_pps,bias_i)
temp_data_fitted = np.random.laplace(np.median(bias),my_iqr(bias)*32/26.0, n_pix)
hist_heights_fitted, x_m, dummy = my_hist(temp_data_fitted, None, bmin=-20*1000, bmax=20*1000, delta_h=100.0)
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot(111)
ax.fill(x_, hist_heights_fitted, color='silver',
label = "\nGaussian\n" + my_legend_text_6(temp_data_fitted, ))
plt.plot(x_, hist_heights_pps, "r-",
label = "NN-CTTH\n"+my_legend_text_6(bias_i, "PPS" ))
ax.set_xlim(-4,6)
leg = plt.legend(loc="upper right", markerscale=2., numpoints=1,scatterpoints=1, bbox_to_anchor=(1.13, 1.2), framealpha=1.0, frameon=True)
leg.get_frame().set_facecolor('w')
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/VAL_2018_PLOTS/val_report_ctth_error_and_gaussian_fitted_%s.png"%(label_str),bbox_inches='tight')
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/VAL_2018_PLOTS/val_report_ctth_error_and_gaussian_fitted_%s.pdf"%(label_str),bbox_inches='tight')
plt.style.use('seaborn-white')
fig = plt.figure(figsize=(7, 5))
#plt.suptitle("CTTH error distributions not well described by RMS and bias")
ax = fig.add_subplot(111)
#S1
#ax.set_title('PPS-v2018', loc='left', zorder=30)
ax.fill(x_, hist_heights_g, color='silver',
label = "\nGaussian\n" + my_legend_text(temp_data_g, ))
plt.plot(x_, hist_heights_pps, "r-",
label = "NN-CTTH\n"+my_legend_text(bias_i, "PPS" ))
plt.plot([0.001*np.mean(bias_i),0.001*np.mean(bias_i)], [0,2], 'r:')
ax.set_xlim(-4,6)
ax.set_ylabel('Percent')
ax.set_xlabel('Error (km)')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.grid(True)
leg = plt.legend(loc="upper right", markerscale=2., numpoints=1,scatterpoints=1, bbox_to_anchor=(1.13, 1.2), framealpha=1.0, frameon=True)
leg.get_frame().set_facecolor('w')
leg.get_frame().set_linewidth(0.0)
#import pdb
#pdb.set_trace()
print("inside {:3.1f}".format(np.sum(hist_heights_pps[np.logical_and(x_>=-4, x_<=6)])))
print("gaussian inside {:3.1f}".format(np.sum(hist_heights_g[np.logical_and(x_>=-4, x_<=6)])))
#plt.legend(frameon=False)
ax.set_ylim(0,10)
ax.set_xlim(-4,6)
plt.yticks(np.arange(0,9,2.0))
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/VAL_2018_PLOTS/val_report_ctth_error_and_gaussian_%s.png"%(label_str),bbox_inches='tight')
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/VAL_2018_PLOTS/val_report_ctth_error_and_gaussian_%s.pdf"%(label_str),bbox_inches='tight')
plt.style.use('seaborn-white')
fig = plt.figure(figsize=(12, 4.5))
#plt.suptitle("CTTH error distributions not well described by RMS and bias")
ax = fig.add_subplot(121)
#S1
ax.grid(True)
ax.set_title('PPS-v2018', loc='left', zorder=30)
#ax.fill(x_, hist_heights_g, color='silver',
# label = my_legend_text(temp_data_g, "Gaussian (STD)"))
plt.fill(x_, hist_heights_pps, "r-", #alpha = 0.5,
label = my_legend_text_6(bias_i, "PPS" ))
plt.plot(x_-0.001*np.mean(bias_i), hist_heights_pps, "b-")
ax.set_xlim(-4,6)
ax.set_ylabel('Percent')
ax.set_xlabel('Error (km)')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
leg = plt.legend(loc="upper right", markerscale=2., numpoints=1,scatterpoints=1, bbox_to_anchor=(1.13, 1.2),framealpha=1.0, frameon=True)
leg.get_frame().set_facecolor('w')
leg.get_frame().set_linewidth(0.0)
ax.set_ylim(0,10)
ax.set_xlim(-4,6)
plt.yticks(np.arange(0,9,2.0))
ax = fig.add_subplot(122)
ax.grid(True)
#S1
ax.set_title('Bias "corrected"', loc='left', zorder=30)
#ax.fill(x_-0.001*np.mean(temp_data_g), hist_heights_g, color='silver',
# label = my_legend_text(temp_data_g-0.001*np.mean(temp_data_g), "Gaussian (STD)"))
plt.fill(x_-0.001*np.mean(bias_i), hist_heights_pps, "b-", #alpha = 0.5,
label = my_legend_text_6(bias_i-np.mean(bias_i), "PPS" ))
plt.plot(x_, hist_heights_pps, "r-")
ax.set_xlim(-4,6)
#ax.set_ylabel('Percent')
ax.set_xlabel('Error (km)')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
leg = plt.legend(loc="upper right", markerscale=2., numpoints=1,scatterpoints=1, bbox_to_anchor=(1.13, 1.2),framealpha=1.0, frameon=True)
leg.get_frame().set_facecolor('w')
leg.get_frame().set_linewidth(0.0)
ax.set_ylim(0,10)
ax.set_xlim(-4,6)
plt.yticks(np.arange(0,9,2.0))
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/VAL_2018_PLOTS/val_report_ctth_error_and_gaussian_and_biascorr_%s.png"%(label_str),bbox_inches='tight')
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/VAL_2018_PLOTS/val_report_ctth_error_and_gaussian_and_biascorr_%s.pdf"%(label_str),bbox_inches='tight')
plt.close()
plt.style.use('seaborn-white')
fig = plt.figure(figsize=(15, 11))
#plt.suptitle("CTTH error distributions not well described by RMS and bias")
ax = fig.add_subplot(221)
#S1
ax.set_title('a) Equal bias/std')
ax.fill(x_, hist_heights_g, color='silver',
label = my_legend_text(temp_data_g, "Gaussian (STD)"))
plt.plot(x_, hist_heights_pps, "r-",
label = "\n" + my_legend_text(bias_i, "PPS"))
ax.set_ylabel('Percent')
my_adjust_axis(ax)
ax.set_xticklabels([])
#S2
ax = fig.add_subplot(222)
ax.set_title('b) Equal IQR/median')
ax.fill(x_, hist_heights_iqr, color='grey',
label = "\n" +my_legend_text(temp_data_iqr, "Gaussian (IQR)"))
plt.plot(x_, hist_heights_pps, "r-")#, label='PPS')
my_adjust_axis(ax)
ax.set_yticklabels([])
ax.set_xticklabels([])
#S3
ax = fig.add_subplot(223)
ax.set_title('c) Equal bias/std, sum of types')
plt.fill(x_, hist_heights_gs, color='silver',
label = "\n" + my_legend_text(temp_data_gs, "Gaussian \Sum (STD)"))
plt.plot(x_, hist_heights_pps, "r-")#, label='PPS')
my_adjust_axis(ax)
ax.set_ylabel('Percent')
ax.set_xlabel('error (km)')
#S4
ax = fig.add_subplot(224)
ax.set_title('d) Equal IQR/median, sum over cloud types')
ax.fill(x_, hist_heights_iqrs, color='grey',
label = "\n" + my_legend_text(temp_data_iqrs, "Gaussian \Sum (IQR)"))
plt.plot(x_, hist_heights_pps, "r-")#, label='PPS')
ax.set_xlabel('error (km)')
my_adjust_axis(ax)
ax.set_yticklabels([])
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/VAL_2018_PLOTS/val_report_ctth_error_aprox_dist_%s.png"%(label_str),bbox_inches='tight')
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/VAL_2018_PLOTS/val_report_ctth_error_aprox_dist_%s.pdf"%(label_str),bbox_inches='tight')
#plt.show()
def my_print_one_line(out_file_h, bias, x, y, use_ind, compare_name, flag_key):
out_line = "%s_%s %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %3.1f "%(
compare_name, flag_key,
np.mean(bias[use_ind]),
np.median(bias[use_ind]),
my_iqr(bias[use_ind]),
my_pe500m(bias[use_ind]),
my_pe1000m(bias[use_ind]),
my_mae(bias[use_ind]),
np.std(bias[use_ind]),
len(bias[use_ind]),
my_rms(bias[use_ind]),
np.mean(x[use_ind]),
np.mean(y[use_ind]),
my_pe250m(bias[use_ind]),
my_pe2000m(bias[use_ind]),
my_pe2500m(bias[use_ind]),
my_pe5000m(bias[use_ind]),
half_sample_mode(bias[use_ind]),
skew(bias[use_ind]),#,
#kurtosis(bias[use_ind])
)
out_file_h.write(out_line)
out_file_h.write("\n")
print(out_line)
def print_all_cloudsat(cObj, compare, compare_name = "unknown"):
from utils.get_flag_info import get_cloudsat_low_medium_high_classification
x = cObj.cloudsat.all_arrays['validation_height']
y = cObj.imager.all_arrays['imager_ctth_m_above_seasurface']
mhl = get_cloudsat_low_medium_high_classification(cObj)
use = np.logical_and(x>=0, np.logical_and(y>-9,y<65000))
if mhl is None:
mhl = {}
mhl["all"] = use.copy()
bias = y-x
out_file_h.write(compare_name + " CPR (CloudSat) :\n")
for flag_key in ["all", "low_clouds", "medium_clouds", "high_clouds"]:
if flag_key not in mhl.keys():
continue
else:
use_i = np.logical_and(use, np.logical_and(mhl[flag_key], use))
my_print_one_line(out_file_h, bias, x, y, use_i, "all", flag_key)
def print_all(cObj, compare, compare_name = "unknown"):
#x = getattr(plt_obj, truth)
#y = getattr(plt_obj, compare)
x = cObj.calipso.all_arrays['validation_height']
x2 = cObj.calipso.all_arrays['layer_top_altitude'][:,1]*1000
y = cObj.imager.all_arrays['imager_ctth_m_above_seasurface']
print( np.max(y), np.max(y[y<65000]))
#pressure_c = cObj.calipso.all_arrays['layer_top_pressure'][:,0]
low_clouds = get_calipso_low_clouds(cObj)
high_clouds = get_calipso_high_clouds(cObj)
medium_clouds = get_calipso_medium_clouds(cObj)
mhl = get_calipso_low_medium_high_classification(cObj)
use = np.logical_and(x>=0, np.logical_and(y>-9,y<65000))
use = np.logical_and(use, np.not_equal(cObj.calipso.all_arrays['feature_classification_flags'][:,0],1))
mhl["all"] =use
use_inversion = get_inversion_info_pps2014(cObj.imager.all_arrays["cloudtype_status"])
mhl["high_clouds_tp_thin"] = np.logical_and(
mhl["high_clouds_tp"],
np.logical_and(cObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>=0,
cObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<=0.225))
mhl["high_clouds_tp_not_thin"] = np.logical_and(
mhl["high_clouds_tp"], ~mhl["high_clouds_tp_thin"])
mhl["medium_clouds_tp_thin"] = np.logical_and(
mhl["medium_clouds_tp"],
np.logical_and(cObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>=0,
cObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<=0.225))
mhl["medium_clouds_tp_not_thin"] = np.logical_and(
mhl["medium_clouds_tp"], ~mhl["medium_clouds_tp_thin"])
mhl["low_clouds_tp_thin"] = np.logical_and(
mhl["low_clouds_tp"],
np.logical_and(cObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>=0,
cObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<=0.225))
mhl["low_clouds_tp_not_thin"] = np.logical_and(
mhl["low_clouds_tp"],~mhl["low_clouds_tp_thin"] )
mhl["all_clouds_tp_thin"] = np.logical_and(
mhl["clouds_tp"],
np.logical_and(cObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>=0,
cObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<=0.225))
mhl["all_clouds_tp_not_thin"] = np.logical_and(
mhl["clouds_tp"],~mhl["all_clouds_tp_thin"] )
mhl["all_clouds_tp_bad2"] = np.logical_and(
mhl["high_clouds_tp_thin"],
cObj.calipso.all_arrays['number_layers_found']>=2)
mhl["all_clouds_tp_bad3"] = np.logical_and(
mhl["high_clouds_tp_thin"],
cObj.calipso.all_arrays['number_layers_found']==1)
use_low = np.logical_and(use, low_clouds)
use_medium = np.logical_and(use, medium_clouds)
use_high = np.logical_and(use, high_clouds)
bias = y-x#+1465
bias_second_layer = y-x2
abias = np.abs(bias)
#my_make_plot_example(bias, use, compare_name)
#my_make_plot2(y, x, x2, mhl, mhl["all"])
my_make_plot_example_aprox(bias, use, compare_name, cObj)
from scipy import ndimage
maxct = ndimage.filters.maximum_filter1d(cObj.imager.cloudtype, size=9)
minct = ndimage.filters.minimum_filter1d(cObj.imager.cloudtype, size=9)
val_geo = np.equal(maxct,minct)
#cObj.calipso.layer_top_pressure[:,0][cObj.calipso.layer_top_pressure[:,0]<0] =1200
#cObj.calipso.layer_top_altitude[:,0][cObj.calipso.layer_top_altitude[:,0]<0] =0
if hasattr(cObj,'calipso'):
var_pressure = (ndimage.filters.maximum_filter1d(cObj.calipso.layer_top_pressure[:,0], size=9) -
ndimage.filters.minimum_filter1d(cObj.calipso.layer_top_pressure[:,0], size=9))
val_geo = np.logical_and(
val_geo,
var_pressure<200) #Pressure variation less than 200hPa
var_pressure = (ndimage.filters.maximum_filter1d(cObj.calipso.layer_top_pressure[:,0], size=9) -
ndimage.filters.minimum_filter1d(cObj.calipso.layer_top_pressure[:,0], size=9))
var_height = (ndimage.filters.maximum_filter1d(cObj.calipso.layer_top_altitude[:,0]*1000, size=9) -
ndimage.filters.minimum_filter1d(cObj.calipso.layer_top_altitude[:,0]*1000, size=9))
val_geo2 = var_pressure<100
sunz = np.array(cObj.imager.all_arrays['sunz'])
"""
fig = plt.figure(figsize=(15, 11))
print cObj.calipso.all_arrays['feature_classification_flags'][use][:,0]
cflag_full = cObj.calipso.all_arrays['feature_classification_flags'][:,0]
cflag = cObj.calipso.all_arrays['feature_classification_flags'][:,0][use]
feature_array = (4*np.bitwise_and(np.right_shift(cflag,11),1) +
2*np.bitwise_and(np.right_shift(cflag,10),1) +
1*np.bitwise_and(np.right_shift(cflag,9),1))
feature_array = (4*np.bitwise_and(np.right_shift(cflag,15),1) +
2*np.bitwise_and(np.right_shift(cflag,14),1) +
1*np.bitwise_and(np.right_shift(cflag,13),1))
feature_array_full = (2*np.bitwise_and(np.right_shift(cflag_full,4),1)+
1*np.bitwise_and(np.right_shift(cflag_full,3),1))
feature_array = (2*np.bitwise_and(np.right_shift(cflag,4),1)+
1*np.bitwise_and(np.right_shift(cflag,3),1))
print np.mean(abias[use][feature_array==0]), len(abias[use][feature_array==0])
print np.mean(abias[use][feature_array==1]), len(abias[use][feature_array==1])
print np.mean(abias[use][feature_array==2]), len(abias[use][feature_array==2])
print np.mean(abias[use][feature_array==3]), len(abias[use][feature_array==3])
print my_rms(abias[use][feature_array==0]), len(abias[use][feature_array==0])
print my_rms(abias[use][feature_array==1]), len(abias[use][feature_array==1])
print my_rms(abias[use][feature_array==2]), len(abias[use][feature_array==2])
print my_rms(abias[use][feature_array==3]), len(abias[use][feature_array==3])
fig = plt.figure(figsize=(15, 11))
ax = fig.add_subplot(111)
#plt.plot(var_pressure[use], abias[use],'b.', alpha=0.02)
#plt.plot(var_height[use], abias[use],'b.', alpha=0.02)
#plt.plot(cObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km'][use], abias[use],'b.', alpha=0.02)
#plt.plot(cObj.calipso.all_arrays['cfc_single_shots_1km_from_5km_file'][use]+0.01), abias[use],'b.', alpha=0.02)
#ax.set_xlim([-1.0,1.0])
plt.plot(feature_array, abias[use],'b.', alpha=0.02)
#plt.plot(cObj.calipso.layer_top_altitude[:,0][use],abias[use],'r.', alpha=0.05)
plt.show()
"""
#logger.info("Getting day/night info from sunz")
if np.max(sunz)<20:
sunz =sunz*100.0
day_flag = np.where(np.less_equal(sunz,80),1,0)
night_flag = np.where(np.greater_equal(sunz,95),1,0)
twilight_flag = np.where(
np.logical_and(np.greater(sunz,80),
np.less(sunz,95)),
1,0)
out_file_h.write(compare_name + " CALIOP:\n")
#use = np.logical_and(use,feature_array_full==3)
my_list = sorted(mhl.keys())
my_list = ["clouds_tp", "low_clouds_tp", "medium_clouds_tp", "high_clouds_tp",
#"all_clouds_tp_not_thin", "low_clouds_tp_not_thin", "medium_clouds_tp_not_thin", "high_clouds_tp_not_thin",
#"all_clouds_tp_thin", "low_clouds_tp_thin", "medium_clouds_tp_thin", "high_clouds_tp_thin",
"clouds_op", "low_clouds_op", "medium_clouds_op", "high_clouds_op",
"all_clouds_tp_bad2", "all_clouds_tp_bad3"]
for flag, compare_name in zip( [use, day_flag, night_flag, twilight_flag, val_geo, val_geo2],
["all", "day", "night", "twilight", "geo-style", "no-edges"]):
use_i = np.logical_and(use, flag)
my_print_one_line(out_file_h, bias, x, y, use_i, compare_name, "")
for flag_key in my_list:
use_i = np.logical_and(use, np.logical_and(mhl[flag_key], use))
my_print_one_line(out_file_h, bias, x, y, use_i, "all", flag_key)
for flag_key in my_list:
use_i = np.logical_and(use, np.logical_and(mhl[flag_key], use_inversion))
my_print_one_line(out_file_h, bias, x, y, use_i, "all", flag_key+'inversion')
use_i = np.logical_and(use, np.logical_and(mhl["all_clouds_tp_bad2"], use))
my_print_one_line(out_file_h, bias_second_layer, x2, y, use_i, "all", "all_clouds_tp_bad2")
plt.close('all')
if __name__ == "__main__":
from matchobject_io import read_files
BASE_DIR = ADIR + "/DATA_MISC/reshaped_files_validation_2018/"
ROOT_DIR_v2014 = (BASE_DIR + "global_modis_v2014_created20180920/Reshaped_Files_merged_caliop/eos2/1km/2010/*/*%s*h5")
ROOT_DIR_v2014_clsat = (BASE_DIR + "global_modis_v2014_created20180920/Reshaped_Files_merged_cloudsat/eos2/1km/2010/*/*%s*h5")
ROOT_DIR_v2018 = (BASE_DIR + "global_modis_v2018_created20180920/Reshaped_Files_merged_caliop/eos2/1km/2010/*/*%s*h5")
ROOT_DIR_v2018_clsat = (BASE_DIR + "global_modis_v2018_created20180920/Reshaped_Files_merged_cloudsat/eos2/1km/2010/*/*%s*h5")
ROOT_DIR_v2014_NPP = (BASE_DIR + "global_viirs_v2014_created20180914/Reshaped_Files_merged_caliop/npp/1km/2015/*/*h5")
ROOT_DIR_v2018_NPP = (BASE_DIR + "global_viirs_v2018_created20180907/Reshaped_Files_merged_caliop/npp/1km/2015/*/*h5")
ROOT_DIR_v2014_NPP_clsat = (BASE_DIR + "global_viirs_v2014_created20180914/Reshaped_Files_merged_cloudsat/npp/1km/2015/*/*h5")
ROOT_DIR_v2018_NPP_clsat = (BASE_DIR + "global_viirs_v2018_created20181002_new_cmaprobv5/Reshaped_Files_merged_cloudsat/npp/1km/2015/*/*h5")
ROOT_DIR_v2014_GAC = (BASE_DIR + "global_gac_v2014_created20180927/Reshaped_Files/noaa18/5km/%s/*cali*h5")
ROOT_DIR_v2018_GAC = (BASE_DIR + "global_gac_v2018_created20180927/Reshaped_Files/noaa18/5km/%s/*cali*h5")
ROOT_DIR_v2014_GAC_clsat = (BASE_DIR + "global_gac_v2014_created20180927/Reshaped_Files/noaa18/5km/2009/*clouds*h5")
ROOT_DIR_v2018_GAC_clsat = (BASE_DIR + "global_gac_v2018_created20180927/Reshaped_Files/noaa18/5km/2009/*clouds*h5")
ROOT_DIR = ROOT_DIR_v2018_clsat
#between 27.16770203522559 12.67069745562212
#between 38.70328344488811 22.066300881905146
files = glob(ROOT_DIR_v2014_NPP)
out_file_h.write("NPP-v2014\n")
cObj = read_files(files)
print_all(cObj, None, "NPPv2014")
files = glob(ROOT_DIR_v2018_NPP)
out_file_h.write("NPP-v2018\n")
cObj = read_files(files)
print_all(cObj, None, "NPPv2018")
b=a
files = glob(ROOT_DIR%("20100201"))
files = files + glob(ROOT_DIR%("20100401"))
files = files + glob(ROOT_DIR%("20100601"))
files = files + glob(ROOT_DIR%("20100801"))
files = files + glob(ROOT_DIR%("20101001"))
files = files + glob(ROOT_DIR%("20101201"))
out_file_h.write("MODIS-C6\n")
cObj = read_files(files, truth='cloudsat')
cObj.imager.all_arrays['imager_ctth_m_above_seasurface'] = cObj.modis.all_arrays["height"]
print_all_cloudsat(cObj, None, "MODIS-C6")
ROOT_DIR = ROOT_DIR_v2018
files = glob(ROOT_DIR%("20100201"))
files = files + glob(ROOT_DIR%("20100401"))
files = files + glob(ROOT_DIR%("20100601"))
files = files + glob(ROOT_DIR%("20100801"))
files = files + glob(ROOT_DIR%("20101001"))
files = files + glob(ROOT_DIR%("20101201"))
out_file_h.write("MODIS-C6\n")
cObj = read_files(files)
cObj.imager.all_arrays['imager_ctth_m_above_seasurface'] = cObj.modis.all_arrays["height"]
print_all(cObj, None, "MODIS-C6")
files = glob(ROOT_DIR_v2014_GAC%("2006"))
files = files + glob(ROOT_DIR_v2014_GAC%("2009"))
out_file_h.write("GAc-v2014\n")
cObj = read_files(files)
print_all(cObj, None, "GACv2014")
files = glob(ROOT_DIR_v2018_GAC%("2006"))
files = files + glob(ROOT_DIR_v2018_GAC%("2009"))
out_file_h.write("GAC-v2018\n")
cObj = read_files(files)
print_all(cObj, None, "GACv2018")
files = glob(ROOT_DIR_v2014_GAC_clsat) #only 2009
out_file_h.write("GAc-v2014\n")
cObj = read_files(files, truth='cloudsat')
print_all_cloudsat(cObj, None, "GACv2014")
files = glob(ROOT_DIR_v2018_GAC_clsat) #only 2009
out_file_h.write("GAC-v2018\n")
cObj = read_files(files, truth='cloudsat')
print_all_cloudsat(cObj, None, "GACv2018")
files = glob(ROOT_DIR_v2018_NPP_clsat)
out_file_h.write("NPP-v2018\n")
cObj = read_files(files, truth='cloudsat')
print_all_cloudsat(cObj, None, "NPPv2018")
files = glob(ROOT_DIR_v2014_NPP_clsat)
out_file_h.write("NPP-v2014\n")
cObj = read_files(files, truth='cloudsat')
print_all_cloudsat(cObj, None, "NPPv2014")
ROOT_DIR = ROOT_DIR_v2014_clsat
files = glob(ROOT_DIR%("20100201"))
files = files + glob(ROOT_DIR%("20100401"))
files = files + glob(ROOT_DIR%("20100601"))
files = files + glob(ROOT_DIR%("20100801"))
files = files + glob(ROOT_DIR%("20101001"))
files = files + glob(ROOT_DIR%("20101201"))
out_file_h.write("MODIS-v2014\n")
cObj = read_files(files, truth='cloudsat')
print_all_cloudsat(cObj, None, "MODISv2014")
ROOT_DIR = ROOT_DIR_v2018_clsat
files = glob(ROOT_DIR%("20100201"))
files = files + glob(ROOT_DIR%("20100401"))
files = files + glob(ROOT_DIR%("20100601"))
files = files + glob(ROOT_DIR%("20100801"))
files = files + glob(ROOT_DIR%("20101001"))
files = files + glob(ROOT_DIR%("20101201"))
out_file_h.write("MODIS-v2018\n")
cObj = read_files(files, truth='cloudsat')
print_all_cloudsat(cObj, None, "MODISv2018")
ROOT_DIR = ROOT_DIR_v2014
files = glob(ROOT_DIR%("20100201"))
files = files + glob(ROOT_DIR%("20100401"))
files = files + glob(ROOT_DIR%("20100601"))
files = files + glob(ROOT_DIR%("20100801"))
files = files + glob(ROOT_DIR%("20101001"))
files = files + glob(ROOT_DIR%("20101201"))
out_file_h.write("MODIS-v2014\n")
cObj = read_files(files)
print_all(cObj, None, "eos2v2014")
ROOT_DIR = ROOT_DIR_v2018
files = glob(ROOT_DIR%("20100201"))
files = files + glob(ROOT_DIR%("20100401"))
files = files + glob(ROOT_DIR%("20100601"))
files = files + glob(ROOT_DIR%("20100801"))
files = files + glob(ROOT_DIR%("20101001"))
files = files + glob(ROOT_DIR%("20101201"))
out_file_h.write("MODIS-v2018\n")
cObj = read_files(files)
print_all(cObj, None, "eos2v2018")
| gpl-3.0 |
pgoeser/gnuradio | gr-utils/src/python/plot_data.py | 10 | 5841 | #
# Copyright 2007,2008,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
class plot_data:
def __init__(self, datatype, filenames, options):
self.hfile = list()
self.legend_text = list()
for f in filenames:
self.hfile.append(open(f, "r"))
self.legend_text.append(f)
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = datatype
self.sizeof_data = datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 9), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.40, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_f.get_xlim()
self.manager = get_current_fig_manager()
connect('key_press_event', self.click)
show()
def get_data(self, hfile):
self.text_file_pos.set_text("File Position: %d" % (hfile.tell()//self.sizeof_data))
try:
f = scipy.fromfile(hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
else:
self.f = scipy.array(f)
self.time = scipy.array([i*(1/self.sample_rate) for i in range(len(self.f))])
def make_plots(self):
self.sp_f = self.fig.add_subplot(2,1,1, position=[0.075, 0.2, 0.875, 0.6])
self.sp_f.set_title(("Amplitude"), fontsize=self.title_font_size, fontweight="bold")
self.sp_f.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_f.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
self.plot_f = list()
maxval = -1e12
minval = 1e12
for hf in self.hfile:
# if specified on the command-line, set file pointer
hf.seek(self.sizeof_data*self.start, 1)
self.get_data(hf)
# Subplot for real and imaginary parts of signal
self.plot_f += plot(self.time, self.f, 'o-')
maxval = max(maxval, self.f.max())
minval = min(minval, self.f.min())
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
self.leg = self.sp_f.legend(self.plot_f, self.legend_text)
draw()
def update_plots(self):
maxval = -1e12
minval = 1e12
for hf,p in zip(self.hfile,self.plot_f):
self.get_data(hf)
p.set_data([self.time, self.f])
maxval = max(maxval, self.f.max())
minval = min(minval, self.f.min())
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.update_plots()
def step_backward(self):
for hf in self.hfile:
# Step back in file position
if(hf.tell() >= 2*self.sizeof_data*self.block_length ):
hf.seek(-2*self.sizeof_data*self.block_length, 1)
else:
hf.seek(-hf.tell(),1)
self.update_plots()
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
| gpl-3.0 |
areeda/gwpy | gwpy/segments/flag.py | 2 | 61756 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""This module defines the `DataQualityFlag` and `DataQualityDict`.
The `DataQualityFlag` represents an annotated set of data-quality segments
indicating something about the state of a laser-interferometer
gravitational-wave detector in a given time interval.
The `DataQualityDict` is just a `dict` of flags, provided as a convenience
for handling multiple flags over the same global time interval.
"""
import datetime
import json
import operator
import os
import re
import warnings
from io import BytesIO
from collections import OrderedDict
from copy import (copy as shallowcopy, deepcopy)
from functools import reduce
from math import (floor, ceil)
from queue import Queue
from threading import Thread
from urllib.error import (URLError, HTTPError)
from urllib.parse import urlparse
from numpy import inf
from astropy.io import registry as io_registry
from astropy.utils.data import get_readable_fileobj
from gwosc import timeline
from dqsegdb2.query import query_segments
from ..io.mp import read_multi as io_read_multi
from ..time import to_gps, LIGOTimeGPS
from ..utils.misc import if_not_none
from .segments import Segment, SegmentList
__author__ = "Duncan Macleod <duncan.macleod@ligo.org>"
__all__ = ['DataQualityFlag', 'DataQualityDict']
re_IFO_TAG_VERSION = re.compile(
r"\A(?P<ifo>[A-Z]\d):(?P<tag>[^/]+):(?P<version>\d+)\Z")
re_IFO_TAG = re.compile(r"\A(?P<ifo>[A-Z]\d):(?P<tag>[^/]+)\Z")
re_TAG_VERSION = re.compile(r"\A(?P<tag>[^/]+):(?P<version>\d+)\Z")
DEFAULT_SEGMENT_SERVER = os.getenv('DEFAULT_SEGMENT_SERVER',
'https://segments.ligo.org')
# -- utilities ----------------------------------------------------------------
def _select_query_method(cls, url):
"""Select the correct query method based on the URL
Works for `DataQualityFlag` and `DataQualityDict`
"""
if urlparse(url).netloc.startswith('geosegdb.'): # only DB2 server
return cls.query_segdb
return cls.query_dqsegdb
def _parse_query_segments(args, func):
"""Parse *args for query_dqsegdb() or query_segdb()
Returns a SegmentList in all cases
"""
# user passed SegmentList
if len(args) == 1 and isinstance(args[0], SegmentList):
return args[0]
# otherwise unpack two arguments as a segment
if len(args) == 1:
args = args[0]
# if not two arguments, panic
try:
start, end = args
except ValueError as exc:
exc.args = ('{0}() takes 2 arguments for start and end GPS time, '
'or 1 argument containing a Segment or SegmentList'.format(
func.__name__),)
raise
# return list with one Segment
return SegmentList([Segment(to_gps(start), to_gps(end))])
# -- DataQualityFlag ----------------------------------------------------------
class DataQualityFlag(object):
"""A representation of a named set of segments.
Parameters
----------
name : str, optional
The name of this flag.
This should be of the form {ifo}:{tag}:{version}, e.g.
'H1:DMT-SCIENCE:1'. Use `label` for human-readable names.
active : `SegmentList`, optional
A list of active segments for this flag
known : `SegmentList`, optional
A list of known segments for this flag
label : `str`, optional
Human-readable name for this flag, e.g. ``'Science-mode'``
category : `int`, optional
Veto category for this flag.
description : `str`, optional
Human-readable description of why this flag was created.
isgood : `bool`, optional
Do active segments mean the IFO was in a good state?
"""
_EntryClass = Segment
_ListClass = SegmentList
def __init__(self, name=None, active=None, known=None, label=None,
category=None, description=None, isgood=True, padding=None):
"""Define a new DataQualityFlag.
"""
self.name = name
self.known = known
self.active = active
self.label = label
self.category = category
self.description = description
self.isgood = isgood
self.padding = padding
# -- properties -----------------------------
@property
def name(self):
"""The name associated with this flag.
This normally takes the form {ifo}:{tag}:{version}. If found,
each component is stored separately the associated attributes.
:type: `str`
"""
return self._name
@name.setter
def name(self, name):
self._name = name
try:
self._parse_name(name)
except ValueError:
self._parse_name(None)
@property
def ifo(self):
"""The interferometer associated with this flag.
This should be a single uppercase letter and a single number,
e.g. ``'H1'``.
:type: `str`
"""
return self._ifo
@ifo.setter
def ifo(self, ifoname):
self._ifo = ifoname
@property
def tag(self):
"""The tag (name) associated with this flag.
This should take the form ``'AAA-BBB_CCC_DDD'``, i.e. where
each component is an uppercase acronym of alphanumeric
characters only, e.g. ``'DCH-IMC_BAD_CALIBRATION'`` or
``'DMT-SCIENCE'``.
:type: `str`
"""
return self._tag
@tag.setter
def tag(self, n):
self._tag = n
@property
def version(self):
"""The version number of this flag.
Each flag in the segment database is stored with a version
integer, with each successive version representing a more
accurate dataset for its known segments than any previous.
:type: `int`
"""
return self._version
@version.setter
def version(self, v):
self._version = int(v) if v is not None else None
@property
def label(self):
"""A human-readable label for this flag.
For example: ``'Science-mode'``.
:type: `str`
"""
return self._label
@label.setter
def label(self, lab):
self._label = lab
@property
def active(self):
"""The set of segments during which this flag was
active.
"""
return self._active
@active.setter
def active(self, segmentlist):
if segmentlist is None:
del self.active
else:
self._active = self._ListClass(map(self._EntryClass, segmentlist))
@active.deleter
def active(self):
self._active = self._ListClass()
@property
def known(self):
"""The set of segments during which this flag was
known, and its state was well defined.
"""
return self._known
@known.setter
def known(self, segmentlist):
if segmentlist is None:
del self.known
else:
self._known = self._ListClass(map(self._EntryClass, segmentlist))
@known.deleter
def known(self):
self._known = self._ListClass()
@property
def category(self):
"""Veto category for this flag.
:type: `int`
"""
return self._category
@category.setter
def category(self, cat):
self._category = if_not_none(int, cat)
@property
def description(self):
"""Description of why/how this flag was generated.
:type: `str`
"""
return self._description
@description.setter
def description(self, desc):
self._description = desc
@property
def isgood(self):
"""Whether `active` segments mean the instrument was in a good state.
:type: `bool`
"""
return self._isgood
@isgood.setter
def isgood(self, good):
self._isgood = bool(good)
@property
def padding(self):
"""[start, end) padding for this flag's active segments.
"""
return self._padding
@padding.setter
def padding(self, pad):
if pad is None:
pad = (None, None)
self._padding = tuple(float(p or 0.) for p in pad)
@padding.deleter
def padding(self):
self._padding = (0., 0.)
# -- read-only properties -------------------
@property
def texname(self):
"""Name of this flag in LaTeX printable format.
"""
try:
return self.name.replace('_', r'\_')
except AttributeError:
return None
@property
def extent(self):
"""The single GPS ``[start, stop)`` enclosing segment of this
`DataQualityFlag`.
:type: `Segment`
"""
return self.known.extent()
@property
def livetime(self):
"""Amount of time this flag was `active`.
:type: `float`
"""
return abs(self.active)
@property
def regular(self):
"""`True` if the `active` segments are a proper subset of the `known`.
:type: `bool`
"""
return abs(self.active - self.known) == 0
# -- classmethods ---------------------------
@classmethod
def query(cls, flag, *args, **kwargs):
"""Query for segments of a given flag
This method intelligently selects the `~DataQualityFlag.query_segdb`
or the `~DataQualityFlag.query_dqsegdb` methods based on the
``url`` kwarg given.
Parameters
----------
flag : `str`
The name of the flag for which to query
*args
Either, two `float`-like numbers indicating the
GPS [start, stop) interval, or a `SegmentList`
defining a number of summary segments
url : `str`, optional
URL of the segment database, defaults to
``$DEFAULT_SEGMENT_SERVER`` environment variable, or
``'https://segments.ligo.org'``
See also
--------
DataQualityFlag.query_segdb
DataQualityFlag.query_dqsegdb
for details on the actual query engine, and documentation of
other keyword arguments appropriate for each query
Returns
-------
flag : `DataQualityFlag`
A new `DataQualityFlag`, with the `known` and `active` lists
filled appropriately.
"""
query_ = _select_query_method(
cls, kwargs.get('url', DEFAULT_SEGMENT_SERVER))
return query_(flag, *args, **kwargs)
@classmethod
def query_segdb(cls, flag, *args, **kwargs):
"""Query the initial LIGO segment database for the given flag
Parameters
----------
flag : `str`
The name of the flag for which to query
*args
Either, two `float`-like numbers indicating the
GPS [start, stop) interval, or a `SegmentList`
defining a number of summary segments
url : `str`, optional
URL of the segment database, defaults to
``$DEFAULT_SEGMENT_SERVER`` environment variable, or
``'https://segments.ligo.org'``
Returns
-------
flag : `DataQualityFlag`
A new `DataQualityFlag`, with the `known` and `active` lists
filled appropriately.
"""
warnings.warn("query_segdb is deprecated and will be removed in a "
"future release", DeprecationWarning)
# parse arguments
qsegs = _parse_query_segments(args, cls.query_segdb)
# process query
try:
flags = DataQualityDict.query_segdb([flag], qsegs, **kwargs)
except TypeError as exc:
if 'DataQualityDict' in str(exc):
raise TypeError(str(exc).replace('DataQualityDict',
cls.__name__))
else:
raise
if len(flags) > 1:
raise RuntimeError("Multiple flags returned for single query, "
"something went wrong:\n %s"
% '\n '.join(flags.keys()))
elif len(flags) == 0:
raise RuntimeError("No flags returned for single query, "
"something went wrong.")
return flags[flag]
@classmethod
def query_dqsegdb(cls, flag, *args, **kwargs):
"""Query the advanced LIGO DQSegDB for the given flag
Parameters
----------
flag : `str`
The name of the flag for which to query
*args
Either, two `float`-like numbers indicating the
GPS [start, stop) interval, or a `SegmentList`
defining a number of summary segments
url : `str`, optional
URL of the segment database, defaults to
``$DEFAULT_SEGMENT_SERVER`` environment variable, or
``'https://segments.ligo.org'``
Returns
-------
flag : `DataQualityFlag`
A new `DataQualityFlag`, with the `known` and `active` lists
filled appropriately.
"""
# parse arguments
qsegs = _parse_query_segments(args, cls.query_dqsegdb)
# get server
url = kwargs.pop('url', DEFAULT_SEGMENT_SERVER)
# parse flag
out = cls(name=flag)
if out.ifo is None or out.tag is None:
raise ValueError("Cannot parse ifo or tag (name) for flag %r"
% flag)
# process query
for start, end in qsegs:
# handle infinities
if float(end) == +inf:
end = int(to_gps('now'))
# query
try:
data = query_segments(flag, int(start), int(end), host=url)
except HTTPError as exc:
if exc.code == 404: # if not found, annotate flag name
exc.msg += ' [{0}]'.format(flag)
raise
# read from json buffer
new = cls.read(
BytesIO(json.dumps(data).encode('utf-8')),
format='json',
)
# restrict to query segments
segl = SegmentList([Segment(start, end)])
new.known &= segl
new.active &= segl
out += new
# replace metadata
out.description = new.description
out.isgood = new.isgood
return out
@classmethod
def fetch_open_data(cls, flag, start, end, **kwargs):
"""Fetch Open Data timeline segments into a flag.
flag : `str`
the name of the flag to query
start : `int`, `str`
the GPS start time (or parseable date string) to query
end : `int`, `str`
the GPS end time (or parseable date string) to query
verbose : `bool`, optional
show verbose download progress, default: `False`
timeout : `int`, optional
timeout for download (seconds)
host : `str`, optional
URL of LOSC host, default: ``'losc.ligo.org'``
Returns
-------
flag : `DataQualityFlag`
a new flag with `active` segments filled from Open Data
Examples
--------
>>> from gwpy.segments import DataQualityFlag
>>> print(DataQualityFlag.fetch_open_data('H1_DATA', 'Jan 1 2010',
... 'Jan 2 2010'))
<DataQualityFlag('H1:DATA',
known=[[946339215 ... 946425615)],
active=[[946340946 ... 946351800)
[946356479 ... 946360620)
[946362652 ... 946369150)
[946372854 ... 946382630)
[946395595 ... 946396751)
[946400173 ... 946404977)
[946412312 ... 946413577)
[946415770 ... 946422986)],
description=None)>
"""
start = to_gps(start).gpsSeconds
end = to_gps(end).gpsSeconds
known = [(start, end)]
active = timeline.get_segments(flag, start, end, **kwargs)
return cls(flag.replace('_', ':', 1), known=known, active=active,
label=flag)
@classmethod
def read(cls, source, *args, **kwargs):
"""Read segments from file into a `DataQualityFlag`.
Parameters
----------
filename : `str`
path of file to read
name : `str`, optional
name of flag to read from file, otherwise read all segments.
format : `str`, optional
source format identifier. If not given, the format will be
detected if possible. See below for list of acceptable
formats.
coltype : `type`, optional, default: `float`
datatype to force for segment times, only valid for
``format='segwizard'``.
strict : `bool`, optional, default: `True`
require segment start and stop times match printed duration,
only valid for ``format='segwizard'``.
coalesce : `bool`, optional
if `True` coalesce the all segment lists before returning,
otherwise return exactly as contained in file(s).
nproc : `int`, optional, default: 1
number of CPUs to use for parallel reading of multiple files
verbose : `bool`, optional, default: `False`
print a progress bar showing read status
Returns
-------
dqflag : `DataQualityFlag`
formatted `DataQualityFlag` containing the active and known
segments read from file.
Raises
------
IndexError
if ``source`` is an empty list
Notes
-----"""
if 'flag' in kwargs: # pragma: no cover
warnings.warn('\'flag\' keyword was renamed \'name\', this '
'warning will result in an error in the future')
kwargs.setdefault('name', kwargs.pop('flags'))
coalesce = kwargs.pop('coalesce', False)
def combiner(flags):
"""Combine `DataQualityFlag` from each file into a single object
"""
out = flags[0]
for flag in flags[1:]:
out.known += flag.known
out.active += flag.active
if coalesce:
return out.coalesce()
return out
return io_read_multi(combiner, cls, source, *args, **kwargs)
@classmethod
def from_veto_def(cls, veto):
"""Define a `DataQualityFlag` from a `VetoDef`
Parameters
----------
veto : :class:`~ligo.lw.lsctables.VetoDef`
veto definition to convert from
"""
name = '%s:%s' % (veto.ifo, veto.name)
try:
name += ':%d' % int(veto.version)
except TypeError:
pass
known = Segment(veto.start_time, veto.end_time or +inf)
pad = (veto.start_pad, veto.end_pad)
return cls(name=name, known=[known], category=veto.category,
description=veto.comment, padding=pad)
# -- methods --------------------------------
def write(self, target, *args, **kwargs):
"""Write this `DataQualityFlag` to file
Notes
-----"""
return io_registry.write(self, target, *args, **kwargs)
def populate(self, source=DEFAULT_SEGMENT_SERVER, segments=None,
pad=True, **kwargs):
"""Query the segment database for this flag's active segments.
This method assumes all of the metadata for each flag have been
filled. Minimally, the following attributes must be filled
.. autosummary::
~DataQualityFlag.name
~DataQualityFlag.known
Segments will be fetched from the database, with any
:attr:`~DataQualityFlag.padding` added on-the-fly.
This `DataQualityFlag` will be modified in-place.
Parameters
----------
source : `str`
source of segments for this flag. This must be
either a URL for a segment database or a path to a file on disk.
segments : `SegmentList`, optional
a list of segments during which to query, if not given,
existing known segments for this flag will be used.
pad : `bool`, optional, default: `True`
apply the `~DataQualityFlag.padding` associated with this
flag, default: `True`.
**kwargs
any other keyword arguments to be passed to
:meth:`DataQualityFlag.query` or :meth:`DataQualityFlag.read`.
Returns
-------
self : `DataQualityFlag`
a reference to this flag
"""
tmp = DataQualityDict()
tmp[self.name] = self
tmp.populate(source=source, segments=segments, pad=pad, **kwargs)
return tmp[self.name]
def contract(self, x):
"""Contract each of the `active` `Segments` by ``x`` seconds.
This method adds ``x`` to each segment's lower bound, and subtracts
``x`` from the upper bound.
The :attr:`~DataQualityFlag.active` `SegmentList` is modified
in place.
Parameters
----------
x : `float`
number of seconds by which to contract each `Segment`.
"""
self.active = self.active.contract(x)
return self.active
def protract(self, x):
"""Protract each of the `active` `Segments` by ``x`` seconds.
This method subtracts ``x`` from each segment's lower bound,
and adds ``x`` to the upper bound, while maintaining that each
`Segment` stays within the `known` bounds.
The :attr:`~DataQualityFlag.active` `SegmentList` is modified
in place.
Parameters
----------
x : `float`
number of seconds by which to protact each `Segment`.
"""
self.active = self.active.protract(x)
return self.active
def pad(self, *args, **kwargs):
"""Apply a padding to each segment in this `DataQualityFlag`
This method either takes no arguments, in which case the value of
the :attr:`~DataQualityFlag.padding` attribute will be used,
or two values representing the padding for the start and end of
each segment.
For both the `start` and `end` paddings, a positive value means
pad forward in time, so that a positive `start` pad or negative
`end` padding will contract a segment at one or both ends,
and vice-versa.
This method will apply the same padding to both the
`~DataQualityFlag.known` and `~DataQualityFlag.active` lists,
but will not :meth:`~DataQualityFlag.coalesce` the result.
Parameters
----------
start : `float`
padding to apply to the start of the each segment
end : `float`
padding to apply to the end of each segment
inplace : `bool`, optional, default: `False`
modify this object in-place, default is `False`, i.e. return
a copy of the original object with padded segments
Returns
-------
paddedflag : `DataQualityFlag`
a view of the modified flag
"""
if not args:
start, end = self.padding
else:
start, end = args
if kwargs.pop('inplace', False):
new = self
else:
new = self.copy()
if kwargs:
raise TypeError("unexpected keyword argument %r"
% list(kwargs.keys())[0])
new.known = [(s[0]+start, s[1]+end) for s in self.known]
new.active = [(s[0]+start, s[1]+end) for s in self.active]
return new
def round(self, contract=False):
"""Round this flag to integer segments.
Parameters
----------
contract : `bool`, optional
if `False` (default) expand each segment to the containing
integer boundaries, otherwise contract each segment to the
contained boundaries
Returns
-------
roundedflag : `DataQualityFlag`
A copy of the original flag with the `active` and `known` segments
padded out to integer boundaries.
"""
def _round(seg):
if contract: # round inwards
a = type(seg[0])(ceil(seg[0]))
b = type(seg[1])(floor(seg[1]))
else: # round outwards
a = type(seg[0])(floor(seg[0]))
b = type(seg[1])(ceil(seg[1]))
if a >= b: # if segment is too short, return 'null' segment
return type(seg)(0, 0) # will get coalesced away
return type(seg)(a, b)
new = self.copy()
new.active = type(new.active)(map(_round, new.active))
new.known = type(new.known)(map(_round, new.known))
return new.coalesce()
def coalesce(self):
"""Coalesce the segments for this flag.
This method does two things:
- `coalesces <SegmentList.coalesce>` the `~DataQualityFlag.known` and
`~DataQualityFlag.active` segment lists
- forces the `active` segments to be a proper subset of the `known`
segments
.. note::
this operations is performed in-place.
Returns
-------
self
a view of this flag, not a copy.
"""
self.known = self.known.coalesce()
self.active = self.active.coalesce()
self.active = (self.known & self.active).coalesce()
return self
def __repr__(self):
prefix = "<{}(".format(type(self).__name__)
suffix = ")>"
indent = " " * len(prefix)
# format segment lists
known = str(self.known).replace(
"\n",
"\n{} ".format(indent),
).split("\n")
if len(known) > 10: # use ellipsis
known = known[:3] + ['{} ...'.format(indent)] + known[-3:]
active = str(self.active).replace(
"\n",
"\n{} ".format(indent),
).split('\n')
if len(active) > 10: # use ellipsis
active = active[:3] + ['{} ...'.format(indent)] + active[-3:]
# print the thing
return "".join((
prefix,
"\n{}".format(indent).join([
"{},".format(repr(self.name)),
"known={}".format("\n".join(known)),
"active={}".format("\n".join(active)),
"description={}".format(repr(self.description)),
]),
suffix,
))
def copy(self):
"""Build an exact copy of this flag.
Returns
-------
flag2 : `DataQualityFlag`
a copy of the original flag, but with a fresh memory address.
"""
return deepcopy(self)
def plot(self, figsize=(12, 4), xscale='auto-gps', **kwargs):
"""Plot this flag on a segments projection.
Parameters
----------
**kwargs
all keyword arguments are passed to the
:class:`~gwpy.plot.Plot` constructor.
Returns
-------
figure : `~matplotlib.figure.Figure`
the newly created figure, with populated Axes.
See also
--------
matplotlib.pyplot.figure
for documentation of keyword arguments used to create the
figure
matplotlib.figure.Figure.add_subplot
for documentation of keyword arguments used to create the
axes
gwpy.plot.SegmentAxes.plot_segmentlist
for documentation of keyword arguments used in rendering the data
"""
from matplotlib import rcParams
from ..plot import Plot
if self.label:
kwargs.setdefault('label', self.label)
elif rcParams['text.usetex']:
kwargs.setdefault('label', self.texname)
else:
kwargs.setdefault('label', self.name)
kwargs.update(figsize=figsize, xscale=xscale)
return Plot(self, projection='segments', **kwargs)
def _parse_name(self, name):
"""Internal method to parse a `string` name into constituent
`ifo, `name` and `version` components.
Parameters
----------
name : `str`, `None`
the full name of a `DataQualityFlag` to parse, e.g.
``'H1:DMT-SCIENCE:1'``, or `None` to set all components
to `None`
Returns
-------
(ifo, name, version)
A tuple of component string parts
Raises
------
`ValueError`
If the input ``name`` cannot be parsed into
{ifo}:{tag}:{version} format.
"""
if isinstance(name, bytes):
name = name.decode('utf-8')
if name is None:
self.ifo = None
self.tag = None
self.version = None
elif re_IFO_TAG_VERSION.match(name):
match = re_IFO_TAG_VERSION.match(name).groupdict()
self.ifo = match['ifo']
self.tag = match['tag']
self.version = int(match['version'])
elif re_IFO_TAG.match(name):
match = re_IFO_TAG.match(name).groupdict()
self.ifo = match['ifo']
self.tag = match['tag']
self.version = None
elif re_TAG_VERSION.match(name):
match = re_TAG_VERSION.match(name).groupdict()
self.ifo = None
self.tag = match['tag']
self.version = int(match['version'])
else:
raise ValueError("No flag name structure detected in '%s', flags "
"should be named as '{ifo}:{tag}:{version}'. "
"For arbitrary strings, use the "
"`DataQualityFlag.label` attribute" % name)
return self.ifo, self.tag, self.version
def __and__(self, other):
"""Find the intersection of this one and ``other``.
"""
return self.copy().__iand__(other)
def __iand__(self, other):
"""Intersect this flag with ``other`` in-place.
"""
self.known &= other.known
self.active &= other.active
return self
def __sub__(self, other):
"""Find the difference between this flag and another.
"""
return self.copy().__isub__(other)
def __isub__(self, other):
"""Subtract the ``other`` `DataQualityFlag` from this one in-place.
"""
self.known &= other.known
self.active -= other.active
self.active &= self.known
return self
def __or__(self, other):
"""Find the union of this flag and ``other``.
"""
return self.copy().__ior__(other)
def __ior__(self, other):
"""Add the ``other`` `DataQualityFlag` to this one in-place.
"""
self.known |= other.known
self.active |= other.active
return self
__add__ = __or__
__iadd__ = __ior__
def __xor__(self, other):
"""Find the exclusive OR of this one and ``other``.
"""
return self.copy().__ixor__(other)
def __ixor__(self, other):
"""Exclusive OR this flag with ``other`` in-place.
"""
self.known &= other.known
self.active ^= other.active
return self
def __invert__(self):
new = self.copy()
new.active = ~self.active
new.active &= new.known
return new
class _QueryDQSegDBThread(Thread):
"""Threaded DQSegDB query
"""
def __init__(self, inqueue, outqueue, *args, **kwargs):
Thread.__init__(self)
self.in_ = inqueue
self.out = outqueue
self.args = args
self.kwargs = kwargs
def run(self):
i, flag = self.in_.get()
self.in_.task_done()
try:
self.out.put(
(i, DataQualityFlag.query_dqsegdb(flag, *self.args,
**self.kwargs)))
except Exception as exc:
self.out.put((i, exc))
self.out.task_done()
class DataQualityDict(OrderedDict):
"""An `~collections.OrderedDict` of (key, `DataQualityFlag`) pairs.
Since the `DataQualityDict` is an `OrderedDict`, all iterations over
its elements retain the order in which they were inserted.
"""
_EntryClass = DataQualityFlag
# -- classmethods ---------------------------
@classmethod
def query(cls, flags, *args, **kwargs):
"""Query for segments of a set of flags.
This method intelligently selects the `~DataQualityDict.query_segdb`
or the `~DataQualityDict.query_dqsegdb` methods based on the
``url`` kwarg given.
Parameters
----------
flags : `iterable`
A list of flag names for which to query.
*args
Either, two `float`-like numbers indicating the
GPS [start, stop) interval, or a `SegmentList`
defining a number of summary segments
url : `str`, optional
URL of the segment database, defaults to
``$DEFAULT_SEGMENT_SERVER`` environment variable, or
``'https://segments.ligo.org'``
See also
--------
DataQualityDict.query_segdb
DataQualityDict.query_dqsegdb
for details on the actual query engine, and documentation of
other keyword arguments appropriate for each query
Returns
-------
flagdict : `DataQualityDict`
A `dict` of `(name, DataQualityFlag)` pairs
"""
query_ = _select_query_method(
cls, kwargs.get('url', DEFAULT_SEGMENT_SERVER))
return query_(flags, *args, **kwargs)
@classmethod
def query_segdb(cls, flags, *args, **kwargs):
"""Query the inital LIGO segment database for a list of flags.
Parameters
----------
flags : `iterable`
A list of flag names for which to query.
*args
Either, two `float`-like numbers indicating the
GPS [start, stop) interval, or a `SegmentList`
defining a number of summary segments.
url : `str`, optional
URL of the segment database, defaults to
``$DEFAULT_SEGMENT_SERVER`` environment variable, or
``'https://segments.ligo.org'``
Returns
-------
flagdict : `DataQualityDict`
An ordered `DataQualityDict` of (name, `DataQualityFlag`)
pairs.
"""
warnings.warn("query_segdb is deprecated and will be removed in a "
"future release", DeprecationWarning)
# parse segments
qsegs = _parse_query_segments(args, cls.query_segdb)
url = kwargs.pop('url', DEFAULT_SEGMENT_SERVER)
if kwargs.pop('on_error', None) is not None:
warnings.warn("DataQualityDict.query_segdb doesn't accept the "
"on_error keyword argument")
if kwargs.keys():
raise TypeError("DataQualityDict.query_segdb has no keyword "
"argument '%s'" % list(kwargs.keys()[0]))
# process query
from glue.segmentdb import (segmentdb_utils as segdb_utils,
query_engine as segdb_engine)
connection = segdb_utils.setup_database(url)
engine = segdb_engine.LdbdQueryEngine(connection)
segdefs = []
for flag in flags:
dqflag = DataQualityFlag(name=flag)
ifo = dqflag.ifo
name = dqflag.tag
if dqflag.version is None:
vers = '*'
else:
vers = dqflag.version
for gpsstart, gpsend in qsegs:
if float(gpsend) == +inf:
gpsend = int(to_gps('now'))
gpsstart = float(gpsstart)
if not gpsstart.is_integer():
raise ValueError("Segment database queries can only"
"operate on integer GPS times")
gpsend = float(gpsend)
if not gpsend.is_integer():
raise ValueError("Segment database queries can only"
"operate on integer GPS times")
segdefs += segdb_utils.expand_version_number(
engine, (ifo, name, vers, gpsstart, gpsend, 0, 0))
segs = segdb_utils.query_segments(engine, 'segment', segdefs)
segsum = segdb_utils.query_segments(engine, 'segment_summary', segdefs)
# build output
out = cls()
for definition, segments, summary in zip(segdefs, segs, segsum):
# parse flag name
flag = ':'.join(map(str, definition[:3]))
name = flag.rsplit(':', 1)[0]
# if versionless
if flag.endswith('*'):
flag = name
key = name
# if asked for versionless, but returned a version
elif flag not in flags and name in flags:
key = name
# other
else:
key = flag
# define flag
if key not in out:
out[key] = DataQualityFlag(name=flag)
# add segments
out[key].known.extend(summary)
out[key].active.extend(segments)
return out
@classmethod
def query_dqsegdb(cls, flags, *args, **kwargs):
"""Query the advanced LIGO DQSegDB for a list of flags.
Parameters
----------
flags : `iterable`
A list of flag names for which to query.
*args
Either, two `float`-like numbers indicating the
GPS [start, stop) interval, or a `SegmentList`
defining a number of summary segments.
on_error : `str`
how to handle an error querying for one flag, one of
- `'raise'` (default): raise the Exception
- `'warn'`: print a warning
- `'ignore'`: move onto the next flag as if nothing happened
url : `str`, optional
URL of the segment database, defaults to
``$DEFAULT_SEGMENT_SERVER`` environment variable, or
``'https://segments.ligo.org'``
Returns
-------
flagdict : `DataQualityDict`
An ordered `DataQualityDict` of (name, `DataQualityFlag`)
pairs.
"""
# check on_error flag
on_error = kwargs.pop('on_error', 'raise').lower()
if on_error not in ['raise', 'warn', 'ignore']:
raise ValueError("on_error must be one of 'raise', 'warn', "
"or 'ignore'")
# parse segments
qsegs = _parse_query_segments(args, cls.query_dqsegdb)
# set up threading
inq = Queue()
outq = Queue()
for i in range(len(flags)):
t = _QueryDQSegDBThread(inq, outq, qsegs, **kwargs)
t.setDaemon(True)
t.start()
for i, flag in enumerate(flags):
inq.put((i, flag))
# capture output
inq.join()
outq.join()
new = cls()
results = list(zip(*sorted([outq.get() for i in range(len(flags))],
key=lambda x: x[0])))[1]
for result, flag in zip(results, flags):
if isinstance(result, Exception):
result.args = ('%s [%s]' % (str(result), str(flag)),)
if on_error == 'ignore':
pass
elif on_error == 'warn':
warnings.warn(str(result))
else:
raise result
else:
new[flag] = result
return new
@classmethod
def read(cls, source, names=None, format=None, **kwargs):
"""Read segments from file into a `DataQualityDict`
Parameters
----------
source : `str`
path of file to read
format : `str`, optional
source format identifier. If not given, the format will be
detected if possible. See below for list of acceptable
formats.
names : `list`, optional, default: read all names found
list of names to read, by default all names are read separately.
coalesce : `bool`, optional
if `True` coalesce the all segment lists before returning,
otherwise return exactly as contained in file(s).
nproc : `int`, optional, default: 1
number of CPUs to use for parallel reading of multiple files
verbose : `bool`, optional, default: `False`
print a progress bar showing read status
Returns
-------
flagdict : `DataQualityDict`
a new `DataQualityDict` of `DataQualityFlag` entries with
``active`` and ``known`` segments seeded from the XML tables
in the given file.
Notes
-----"""
on_missing = kwargs.pop('on_missing', 'error')
coalesce = kwargs.pop('coalesce', False)
if 'flags' in kwargs: # pragma: no cover
warnings.warn('\'flags\' keyword was renamed \'names\', this '
'warning will result in an error in the future')
names = kwargs.pop('flags')
def combiner(inputs):
out = cls()
# check all names are contained
required = set(names or [])
found = set(name for dqdict in inputs for name in dqdict)
for name in required - found: # validate all names are found once
msg = '{!r} not found in any input file'.format(name)
if on_missing == 'ignore':
continue
if on_missing == 'warn':
warnings.warn(msg)
else:
raise ValueError(msg)
# combine flags
for dqdict in inputs:
for flag in dqdict:
try: # repeated occurence
out[flag].known.extend(dqdict[flag].known)
out[flag].active.extend(dqdict[flag].active)
except KeyError: # first occurence
out[flag] = dqdict[flag]
if coalesce:
return out.coalesce()
return out
return io_read_multi(combiner, cls, source, names=names,
format=format, on_missing='ignore', **kwargs)
@classmethod
def from_veto_definer_file(cls, fp, start=None, end=None, ifo=None,
format='ligolw'):
"""Read a `DataQualityDict` from a LIGO_LW XML VetoDefinerTable.
Parameters
----------
fp : `str`
path of veto definer file to read
start : `~gwpy.time.LIGOTimeGPS`, `int`, optional
GPS start time at which to restrict returned flags
end : `~gwpy.time.LIGOTimeGPS`, `int`, optional
GPS end time at which to restrict returned flags
ifo : `str`, optional
interferometer prefix whose flags you want to read
format : `str`, optional
format of file to read, currently only 'ligolw' is supported
Returns
-------
flags : `DataQualityDict`
a `DataQualityDict` of flags parsed from the `veto_def_table`
of the input file.
Notes
-----
This method does not automatically `~DataQualityDict.populate`
the `active` segment list of any flags, a separate call should
be made for that as follows
>>> flags = DataQualityDict.from_veto_definer_file('/path/to/file.xml')
>>> flags.populate()
"""
if format != 'ligolw':
raise NotImplementedError("Reading veto definer from non-ligolw "
"format file is not currently "
"supported")
# read veto definer file
with get_readable_fileobj(fp, show_progress=False) as fobj:
from ..io.ligolw import read_table as read_ligolw_table
veto_def_table = read_ligolw_table(fobj, 'veto_definer')
if start is not None:
start = to_gps(start)
if end is not None:
end = to_gps(end)
# parse flag definitions
out = cls()
for row in veto_def_table:
if ifo and row.ifo != ifo:
continue
if start and 0 < row.end_time <= start:
continue
elif start:
row.start_time = max(row.start_time, start)
if end and row.start_time >= end:
continue
elif end and not row.end_time:
row.end_time = end
elif end:
row.end_time = min(row.end_time, end)
flag = DataQualityFlag.from_veto_def(row)
if flag.name in out:
out[flag.name].known.extend(flag.known)
out[flag.name].known.coalesce()
else:
out[flag.name] = flag
return out
@classmethod
def from_ligolw_tables(cls, segmentdeftable, segmentsumtable,
segmenttable, names=None, gpstype=LIGOTimeGPS,
on_missing='error'):
"""Build a `DataQualityDict` from a set of LIGO_LW segment tables
Parameters
----------
segmentdeftable : :class:`~ligo.lw.lsctables.SegmentDefTable`
the ``segment_definer`` table to read
segmentsumtable : :class:`~ligo.lw.lsctables.SegmentSumTable`
the ``segment_summary`` table to read
segmenttable : :class:`~ligo.lw.lsctables.SegmentTable`
the ``segment`` table to read
names : `list` of `str`, optional
a list of flag names to read, defaults to returning all
gpstype : `type`, `callable`, optional
class to use for GPS times in returned objects, can be a function
to convert GPS time to something else, default is
`~gwpy.time.LIGOTimeGPS`
on_missing : `str`, optional
action to take when a one or more ``names`` are not found in
the ``segment_definer`` table, one of
- ``'ignore'`` : do nothing
- ``'warn'`` : print a warning
- ``error'`` : raise a `ValueError`
Returns
-------
dqdict : `DataQualityDict`
a dict of `DataQualityFlag` objects populated from the LIGO_LW
tables
"""
out = cls()
id_ = dict() # need to record relative IDs from LIGO_LW
# read segment definers and generate DataQualityFlag object
for row in segmentdeftable:
ifos = sorted(row.instruments)
ifo = ''.join(ifos) if ifos else None
tag = row.name
version = row.version
name = ':'.join([str(k) for k in (ifo, tag, version) if
k is not None])
if names is None or name in names:
out[name] = DataQualityFlag(name)
thisid = int(row.segment_def_id)
try:
id_[name].append(thisid)
except (AttributeError, KeyError):
id_[name] = [thisid]
# verify all requested flags were found
for flag in names or []:
if flag not in out and on_missing != 'ignore':
msg = ("no segment definition found for flag={0!r} in "
"file".format(flag))
if on_missing == 'warn':
warnings.warn(msg)
else:
raise ValueError(msg)
# parse a table into the target DataQualityDict
def _parse_segments(table, listattr):
for row in table:
for flag in out:
# match row ID to list of IDs found for this flag
if int(row.segment_def_id) in id_[flag]:
getattr(out[flag], listattr).append(
Segment(*map(gpstype, row.segment)),
)
break
# read segment summary table as 'known'
_parse_segments(segmentsumtable, "known")
# read segment table as 'active'
_parse_segments(segmenttable, "active")
return out
def to_ligolw_tables(self, ilwdchar_compat=None, **attrs):
"""Convert this `DataQualityDict` into a trio of LIGO_LW segment tables
Parameters
----------
ilwdchar_compat : `bool`, optional
whether to write in the old format, compatible with
ILWD characters (`True`), or to use the new format (`False`);
the current default is `True` to maintain backwards
compatibility, but this will change for gwpy-1.0.0.
**attrs
other attributes to add to all rows in all tables
(e.g. ``'process_id'``)
Returns
-------
segmentdeftable : :class:`~ligo.lw.lsctables.SegmentDefTable`
the ``segment_definer`` table
segmentsumtable : :class:`~ligo.lw.lsctables.SegmentSumTable`
the ``segment_summary`` table
segmenttable : :class:`~ligo.lw.lsctables.SegmentTable`
the ``segment`` table
"""
if ilwdchar_compat is None:
warnings.warn("ilwdchar_compat currently defaults to `True`, "
"but this will change to `False` in the future, to "
"maintain compatibility in future releases, "
"manually specify `ilwdchar_compat=True`",
PendingDeprecationWarning)
ilwdchar_compat = True
if ilwdchar_compat:
from glue.ligolw import lsctables
else:
from ligo.lw import lsctables
from ..io.ligolw import to_table_type as to_ligolw_table_type
SegmentDefTable = lsctables.SegmentDefTable
SegmentSumTable = lsctables.SegmentSumTable
SegmentTable = lsctables.SegmentTable
segdeftab = lsctables.New(SegmentDefTable)
segsumtab = lsctables.New(SegmentSumTable)
segtab = lsctables.New(SegmentTable)
def _write_attrs(table, row):
for key, val in attrs.items():
setattr(row, key, to_ligolw_table_type(val, table, key))
# write flags to tables
for flag in self.values():
# segment definer
segdef = segdeftab.RowType()
for col in segdeftab.columnnames: # default all columns to None
setattr(segdef, col, None)
segdef.instruments = {flag.ifo}
segdef.name = flag.tag
segdef.version = flag.version
segdef.comment = flag.description
segdef.insertion_time = to_gps(datetime.datetime.now()).gpsSeconds
segdef.segment_def_id = SegmentDefTable.get_next_id()
_write_attrs(segdeftab, segdef)
segdeftab.append(segdef)
# write segment summary (known segments)
for vseg in flag.known:
segsum = segsumtab.RowType()
for col in segsumtab.columnnames: # default columns to None
setattr(segsum, col, None)
segsum.segment_def_id = segdef.segment_def_id
segsum.segment = map(LIGOTimeGPS, vseg)
segsum.comment = None
segsum.segment_sum_id = SegmentSumTable.get_next_id()
_write_attrs(segsumtab, segsum)
segsumtab.append(segsum)
# write segment table (active segments)
for aseg in flag.active:
seg = segtab.RowType()
for col in segtab.columnnames: # default all columns to None
setattr(seg, col, None)
seg.segment_def_id = segdef.segment_def_id
seg.segment = map(LIGOTimeGPS, aseg)
seg.segment_id = SegmentTable.get_next_id()
_write_attrs(segtab, seg)
segtab.append(seg)
return segdeftab, segsumtab, segtab
# -- methods --------------------------------
def write(self, target, *args, **kwargs):
"""Write this `DataQualityDict` to file
Notes
-----"""
return io_registry.write(self, target, *args, **kwargs)
def coalesce(self):
"""Coalesce all segments lists in this `DataQualityDict`.
**This method modifies this object in-place.**
Returns
-------
self
a view of this flag, not a copy.
"""
for flag in self:
self[flag].coalesce()
return self
def populate(self, source=DEFAULT_SEGMENT_SERVER,
segments=None, pad=True, on_error='raise', **kwargs):
"""Query the segment database for each flag's active segments.
This method assumes all of the metadata for each flag have been
filled. Minimally, the following attributes must be filled
.. autosummary::
~DataQualityFlag.name
~DataQualityFlag.known
Segments will be fetched from the database, with any
:attr:`~DataQualityFlag.padding` added on-the-fly.
Entries in this dict will be modified in-place.
Parameters
----------
source : `str`
source of segments for this flag. This must be
either a URL for a segment database or a path to a file on disk.
segments : `SegmentList`, optional
a list of known segments during which to query, if not given,
existing known segments for flags will be used.
pad : `bool`, optional, default: `True`
apply the `~DataQualityFlag.padding` associated with each
flag, default: `True`.
on_error : `str`
how to handle an error querying for one flag, one of
- `'raise'` (default): raise the Exception
- `'warn'`: print a warning
- `'ignore'`: move onto the next flag as if nothing happened
**kwargs
any other keyword arguments to be passed to
:meth:`DataQualityFlag.query` or :meth:`DataQualityFlag.read`.
Returns
-------
self : `DataQualityDict`
a reference to the modified DataQualityDict
"""
# check on_error flag
if on_error not in ['raise', 'warn', 'ignore']:
raise ValueError("on_error must be one of 'raise', 'warn', "
"or 'ignore'")
# format source
source = urlparse(source)
# perform query for all segments
if source.netloc and segments is not None:
segments = SegmentList(map(Segment, segments))
tmp = type(self).query(self.keys(), segments, url=source.geturl(),
on_error=on_error, **kwargs)
elif not source.netloc:
tmp = type(self).read(source.geturl(), **kwargs)
# apply padding and wrap to given known segments
for key in self:
if segments is None and source.netloc:
try:
tmp = {key: self[key].query(
self[key].name, self[key].known, **kwargs)}
except URLError as exc:
if on_error == 'ignore':
pass
elif on_error == 'warn':
warnings.warn('Error querying for %s: %s' % (key, exc))
else:
raise
continue
self[key].known &= tmp[key].known
self[key].active = tmp[key].active
if pad:
self[key] = self[key].pad(inplace=True)
if segments is not None:
self[key].known &= segments
self[key].active &= segments
return self
def copy(self, deep=False):
"""Build a copy of this dictionary.
Parameters
----------
deep : `bool`, optional, default: `False`
perform a deep copy of the original dictionary with a fresh
memory address
Returns
-------
flag2 : `DataQualityFlag`
a copy of the original dictionary
"""
if deep:
return deepcopy(self)
return super().copy()
def __iand__(self, other):
for key, value in other.items():
if key in self:
self[key] &= value
else:
self[key] = self._EntryClass()
return self
def __and__(self, other):
if (
sum(len(s.active) for s in self.values())
<= sum(len(s.active) for s in other.values())
):
return self.copy(deep=True).__iand__(other)
return other.copy(deep=True).__iand__(self)
def __ior__(self, other):
for key, value in other.items():
if key in self:
self[key] |= value
else:
self[key] = shallowcopy(value)
return self
def __or__(self, other):
if (
sum(len(s.active) for s in self.values())
>= sum(len(s.active) for s in other.values())
):
return self.copy(deep=True).__ior__(other)
return other.copy(deep=True).__ior__(self)
__iadd__ = __ior__
__add__ = __or__
def __isub__(self, other):
for key, value in other.items():
if key in self:
self[key] -= value
return self
def __sub__(self, other):
return self.copy(deep=True).__isub__(other)
def __ixor__(self, other):
for key, value in other.items():
if key in self:
self[key] ^= value
return self
def __xor__(self, other):
return self.copy(deep=True).__ixor__(other)
def __invert__(self):
new = self.copy(deep=True)
for key, value in new.items():
new[key] = ~value
return new
def union(self):
"""Return the union of all flags in this dict
Returns
-------
union : `DataQualityFlag`
a new `DataQualityFlag` who's active and known segments
are the union of those of the values of this dict
"""
usegs = reduce(operator.or_, self.values())
usegs.name = ' | '.join(self.keys())
return usegs
def intersection(self):
"""Return the intersection of all flags in this dict
Returns
-------
intersection : `DataQualityFlag`
a new `DataQualityFlag` who's active and known segments
are the intersection of those of the values of this dict
"""
isegs = reduce(operator.and_, self.values())
isegs.name = ' & '.join(self.keys())
return isegs
def plot(self, label='key', **kwargs):
"""Plot this flag on a segments projection.
Parameters
----------
label : `str`, optional
Labelling system to use, or fixed label for all flags,
special values include
- ``'key'``: use the key of the `DataQualityDict`,
- ``'name'``: use the :attr:`~DataQualityFlag.name` of the flag
If anything else, that fixed label will be used for all lines.
**kwargs
all keyword arguments are passed to the
:class:`~gwpy.plot.Plot` constructor.
Returns
-------
figure : `~matplotlib.figure.Figure`
the newly created figure, with populated Axes.
See also
--------
matplotlib.pyplot.figure
for documentation of keyword arguments used to create the
figure
matplotlib.figure.Figure.add_subplot
for documentation of keyword arguments used to create the
axes
gwpy.plot.SegmentAxes.plot_segmentlist
for documentation of keyword arguments used in rendering the data
"""
# make plot
from ..plot import Plot
plot = Plot(self, projection='segments', **kwargs)
# update labels
artists = [x for ax in plot.axes for x in ax.collections]
for key, artist in zip(self, artists):
if label.lower() == 'name':
lab = self[key].name
elif label.lower() != 'key':
lab = key
else:
lab = label
artist.set_label(lab)
return plot
| gpl-3.0 |
kashif/scikit-learn | examples/classification/plot_lda_qda.py | 29 | 4952 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis(store_covariances=True)
y_pred = qda.fit(X, y).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
plt.show()
| bsd-3-clause |
evanbiederstedt/RRBSfun | epiphen/cll_tests/annotation_scripts/total_cells_annotate_columns.py | 1 | 34503 | import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
print(len(cw154))
print(len(trito))
totalfiles = normalB + mcell + pcell + cd19cell + cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
indexed_matrix = total_matrix ## keep a copy for index of genomic coordinates
total_matrix = total_matrix.drop("index", axis=1)
drop_columns = total_matrix ## keep copy in order to create 0/1/? matrix such that each character is a column
len(drop_columns.columns)
len(total_matrix.columns)
cell_samples = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG",
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GACACG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.ACAACC',
'RRBS_trito_pool_1_TAAGGCGA.ACGTGG',
'RRBS_trito_pool_1_TAAGGCGA.ACTCAC',
'RRBS_trito_pool_1_TAAGGCGA.ATAGCG',
'RRBS_trito_pool_1_TAAGGCGA.ATCGAC',
'RRBS_trito_pool_1_TAAGGCGA.CAAGAG',
'RRBS_trito_pool_1_TAAGGCGA.CATGAC',
'RRBS_trito_pool_1_TAAGGCGA.CCTTCG',
'RRBS_trito_pool_1_TAAGGCGA.CGGTAG',
'RRBS_trito_pool_1_TAAGGCGA.CTATTG',
'RRBS_trito_pool_1_TAAGGCGA.GACACG',
'RRBS_trito_pool_1_TAAGGCGA.GCATTC',
'RRBS_trito_pool_1_TAAGGCGA.GCTGCC',
'RRBS_trito_pool_1_TAAGGCGA.GGCATC',
'RRBS_trito_pool_1_TAAGGCGA.GTGAGG',
'RRBS_trito_pool_1_TAAGGCGA.GTTGAG',
'RRBS_trito_pool_1_TAAGGCGA.TAGCGG',
'RRBS_trito_pool_1_TAAGGCGA.TATCTC',
'RRBS_trito_pool_1_TAAGGCGA.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.TGACAG',
'RRBS_trito_pool_1_TAAGGCGA.TGCTGC',
'RRBS_trito_pool_2_CGTACTAG.ACAACC',
'RRBS_trito_pool_2_CGTACTAG.ACGTGG',
'RRBS_trito_pool_2_CGTACTAG.ACTCAC',
'RRBS_trito_pool_2_CGTACTAG.AGGATG',
'RRBS_trito_pool_2_CGTACTAG.ATAGCG',
'RRBS_trito_pool_2_CGTACTAG.ATCGAC',
'RRBS_trito_pool_2_CGTACTAG.CAAGAG',
'RRBS_trito_pool_2_CGTACTAG.CATGAC',
'RRBS_trito_pool_2_CGTACTAG.CCTTCG',
'RRBS_trito_pool_2_CGTACTAG.CGGTAG',
'RRBS_trito_pool_2_CGTACTAG.CTATTG',
'RRBS_trito_pool_2_CGTACTAG.GACACG',
'RRBS_trito_pool_2_CGTACTAG.GCATTC',
'RRBS_trito_pool_2_CGTACTAG.GCTGCC',
'RRBS_trito_pool_2_CGTACTAG.GGCATC',
'RRBS_trito_pool_2_CGTACTAG.GTGAGG',
'RRBS_trito_pool_2_CGTACTAG.GTTGAG',
'RRBS_trito_pool_2_CGTACTAG.TAGCGG',
'RRBS_trito_pool_2_CGTACTAG.TATCTC',
'RRBS_trito_pool_2_CGTACTAG.TCTCTG',
'RRBS_trito_pool_2_CGTACTAG.TGACAG']
total_matrix.columns = cell_samples
print(total_matrix.shape)
drop_columns = drop_columns.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
drop_columns = drop_columns.astype(str).apply(''.join)
drop_columns = drop_columns.reset_index()
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/CLL_tests")
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott_drop_columns = pd.Series(drop_columns.index.astype(str).str.cat(total_matrix.astype(str),' '))
print(tott.shape)
print(tott_drop_columns.shape)
df_tott_column_position = tott_drop_columns.apply(lambda x: pd.Series(list(x)))
df_tott_column_position.drop( df_tott_column_position.columns[[i for i in range(6)]], axis=1, inplace=True) ## drop first 5 columns
### rename columns
df_tott_column_position = df_tott_column_position.reindex(columns=indexed_matrix["index"].tolist())
integers_to_sort = df_tott_column_position.columns.to_series().str.extract("([a-z-A-Z]+)(\d*)_(\d+)", expand=True) # use str.extract to get integers to sort
integers_to_sort[1] = integers_to_sort[1].str.zfill(2)
integers_to_sort[2] = integers_to_sort[2].str.zfill(10)
integers_to_sort["new_coordinates"] = integers_to_sort.apply(lambda x: "{}{}_{}".format(x[0],x[1],x[2]), axis=1)
df_tott_column_position.columns = integers_to_sort["new_coordinates"]
df_tott_column_position.columns.name = None
df_tott_column_position = df_tott_column_position.sort_index(axis=1)
df_tott_column_position.insert(0, "cell_sample", value = cell_samples)
df_tott_column_position.to_csv("total_cells_genomic_coordinates_sorted.csv", header=None, index=None)
| mit |
thilbern/scikit-learn | sklearn/datasets/base.py | 12 | 17971 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets: dictionary-like object that
exposes its keys as attributes."""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
frank-tancf/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
pandas-ml/pandas-ml | pandas_ml/confusion_matrix/cm.py | 3 | 1079 | #!/usr/bin/python
# -*- coding: utf8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from pandas_ml.confusion_matrix.abstract import ConfusionMatrixAbstract
"""
A Python Pandas Confusion matrix implementation
"""
class ConfusionMatrix(ConfusionMatrixAbstract):
def __new__(cls, y_true, y_pred, *args, **kwargs):
uniq_true = np.unique(y_true)
uniq_pred = np.unique(y_pred)
if len(uniq_true) <= 2 and len(uniq_pred) <= 2:
if len(set(uniq_true) - set(uniq_pred)) == 0:
from pandas_ml.confusion_matrix.bcm import BinaryConfusionMatrix
return BinaryConfusionMatrix(y_true, y_pred, *args, **kwargs)
return LabeledConfusionMatrix(y_true, y_pred, *args, **kwargs)
class LabeledConfusionMatrix(ConfusionMatrixAbstract):
"""
Confusion matrix class (not binary)
"""
def __getattr__(self, attr):
"""
Returns (weighted) average statistics
"""
return(self._avg_stat(attr))
| bsd-3-clause |
zihua/scikit-learn | examples/gaussian_process/plot_gpr_prior_posterior.py | 104 | 2878 | """
==========================================================================
Illustration of prior and posterior Gaussian process for different kernels
==========================================================================
This example illustrates the prior and posterior of a GPR with different
kernels. Mean, standard deviation, and 10 samples are shown for both prior
and posterior.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0),
nu=1.5)]
for fig_index, kernel in enumerate(kernels):
# Specify Gaussian Process
gp = GaussianProcessRegressor(kernel=kernel)
# Plot prior
plt.figure(fig_index, figsize=(8, 8))
plt.subplot(2, 1, 1)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Prior (kernel: %s)" % kernel, fontsize=12)
# Generate data and fit GP
rng = np.random.RandomState(4)
X = rng.uniform(0, 5, 10)[:, np.newaxis]
y = np.sin((X[:, 0] - 2.5) ** 2)
gp.fit(X, y)
# Plot posterior
plt.subplot(2, 1, 2)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Posterior (kernel: %s)\n Log-Likelihood: %.3f"
% (gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause |
jpautom/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 56 | 2400 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float64)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
DGrady/pandas | pandas/io/s3.py | 16 | 1222 | """ s3 support for remote file interactivity """
from pandas import compat
try:
import s3fs
from botocore.exceptions import NoCredentialsError
except:
raise ImportError("The s3fs library is required to handle s3 files")
if compat.PY3:
from urllib.parse import urlparse as parse_url
else:
from urlparse import urlparse as parse_url
def _strip_schema(url):
"""Returns the url without the s3:// part"""
result = parse_url(url)
return result.netloc + result.path
def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
compression=None):
fs = s3fs.S3FileSystem(anon=False)
try:
filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer))
except (OSError, NoCredentialsError):
# boto3 has troubles when trying to access a public file
# when credentialed...
# An OSError is raised if you have credentials, but they
# aren't valid for that bucket.
# A NoCredentialsError is raised if you don't have creds
# for that bucket.
fs = s3fs.S3FileSystem(anon=True)
filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer))
return filepath_or_buffer, None, compression
| bsd-3-clause |
lifemapper/LmQGIS | dev/trees/newStatsCov.py | 2 | 12533 | import numpy as np
from itertools import combinations
import matplotlib.pyplot as plt
from csv import reader
import math
import os
#ll = [[1,1,1,0,0,0],[0,1,1,1,0,1],[1,1,1,0,0,0],[1,1,1,0,0,0],
# [0,1,1,1,1,0],[1,1,1,0,0,1],[1,1,1,1,0,0]]
def buildCovarianceAvgPerCell():
"""
@summary: this is the average covariance of species in each cell, from a 'virtual'
covariance matrix using the algebraic definition of the matrix, Sigma species
"""
#X = np.array(ll)
X = np.load('/home/jcavner/pam_692.npy') # this is white bellied rat
#print X
N = float(X.shape[0])
S = float(X.shape[1])
O = np.dot(X.T,X)
omega = O.diagonal()
omegaprop = omega/N
XT = X.T
covAvgsForSpsinSite = []
for i,site in enumerate(X):
presPositions = np.where(site==1.0)[0]
if len(presPositions) > 1:
rowCov = []
for comboIdx,c in enumerate(combinations(presPositions,r=2)):
#print i," ",c
# omegaprop is actually the avg
cov = sum((XT[c[0]]-omegaprop[c[0]])*(XT[c[1]]-omegaprop[c[1]]))/N
rowCov.append(cov)
avgCov = sum(rowCov) / (comboIdx+1)
print avgCov
covAvgsForSpsinSite.append(avgCov)
elif len(presPositions) == 1:
print 0.0
covAvgsForSpsinSite.append(0.0)
else: # this is for full pam, take out for pamsum
print 0.0
covAvgsForSpsinSite.append('NULL')
def buildTD_SSCov(X):
"""
@summary: this is the covariance of taxon distance and Sites Shared by each pair
of species in a cell
@param X: full PAM
"""
# for each site (row) find all combinations of species
# then find all sites shared by each species combination
f = open('/home/jcavner/AfricaCovarianceTest/td_ssCov_Pearson.txt','w')
XT = X.T
for i,site in enumerate(X): #[:45]
print i
presencePos = np.where(site==1.0)[0]
# only do this if there are two or more combinations
if len(presencePos) > 2: # in other words 3 presences
sitesSharedPerSpsCombo = []
tdPerSpsCombo = []
comboCount = 0
for comboIdx,combo in enumerate(combinations(presencePos,r=2)):
TD = getTaxonDistanceBtweenPair(combo[0],combo[1])
if TD is not None:
SS = np.dot(XT[combo[0]],XT[combo[1]])
tdPerSpsCombo.append(TD)
sitesSharedPerSpsCombo.append(SS)
comboCount += 1
if comboCount >= 2: # test this
avgSS = sum(sitesSharedPerSpsCombo)/float(comboCount)
avgTD = sum(tdPerSpsCombo)/float(comboCount)
# make array of avgs
SSAvgs = np.empty(comboCount);SSAvgs.fill(avgSS)
TDAvgs = np.empty(comboCount);TDAvgs.fill(avgTD)
ssArray = np.array(sitesSharedPerSpsCombo)
tdArray = np.array(tdPerSpsCombo)
# Deviation
ssDev = ssArray-SSAvgs
tdDev = tdArray-TDAvgs
# calculate variance
ssVariance = sum(ssDev * ssDev)/float(comboCount)
tdVariance = sum(tdDev * tdDev)/float(comboCount)
# calculate std dev
ssStdDev = math.sqrt(ssVariance)
tdStdDev = math.sqrt(tdVariance)
# calculate covariance
Sxy = sum(ssDev * tdDev)/float(comboCount)
# calculate Pearsons
p = Sxy/(ssStdDev * tdStdDev)
f.write('%s,%s,\n' % (Sxy, p))
else:
f.write('0.00,0.00,\n')
else:
f.write('0.00,0.00,\n')
#print 0.0
#cov = sum((XT[c[0]]-omegaprop[c[0]])*(XT[c[1]]-omegaprop[c[1]]))/N # but different
#print
#print "Avg ",sumPerSite/float(comboIdx + 1)
f.close()
print "FINISHED"
def buildLeaves(clade):
if "pathId" in clade:
allClades[clade["pathId"]] = dict((k,v) for k,v in clade.items() if k != "children")
if 'children' in clade:
for child in clade["children"]:
bL(child)
else:
if "mx" in clade:
tipPathsDict[int(clade['mx'])] = clade['path']
def getTaxonDistanceBtweenPair(mtrxIdx1,mtrxIdx2):
try:
sps1PathStr = tipPathsDict[mtrxIdx1]
sps2PathStr = tipPathsDict[mtrxIdx2]
except:
totalLen = None
else:
pl1 = sps1PathStr.split(',')
pl2 = sps2PathStr.split(',')
pL1 = map(int,pl1)
pL2 = map(int,pl2)
pS1 = set(pL1)
pS2 = set(pL2)
ancId = max(set.intersection(pS1,pS2)) # greatest common ancestor pathId
sp1Len = findLengthToId(pL1, ancId)
sp2Len = findLengthToId(pL2, ancId)
totalLen = sp1Len + sp2Len
return totalLen
def findLengthToId(path,ancId):
"""
@param ancId: common ancestor Id
"""
totLen = 0
for pathId in path:
if pathId > ancId:
length = float(allClades[str(pathId)]["length"])
totLen = totLen + length
else:
break
return totLen
def findNearest(matches,pathId):
#print matches," ",pathId
if len(matches) > 1:
# have to find the shortest one
shortestList = []
for matchList in matches: # goes through each of the match lists
compare = 0
for matchId in matchList:
if matchId > pathId:
length = float(allClades[str(matchId)]["length"])
compare = compare + length
else:
shortestList.append(compare)
break
shortest = min(shortestList)
elif len(matches) == 1:
shortest = 0
for matchId in matches[0]:
if matchId > pathId:
length = float(allClades[str(matchId)]["length"])
shortest = shortest + length
else:
break
return shortest
def calcMNTD(pathsInSite):
"""
@param pathsInSite: list of path strings
"""
# need a list of paths of every species in a cell from the tree json
pathList = []
for path in pathsInSite:
pl = path.split(',') # self.list is model from dockable list?
m = map(int,pl) # whole list, or everything minus itself pl[1:]
pathList.append(m) # integers
nearestTaxonLengths = []
for path in pathList:
# builds a a searchIn list that excludes current
# path
index = pathList.index(path)
searchIn = list(pathList)
searchIn.pop(index)
# end search in
# loop through pathids the focus path and find lists with a matching pathId
# and append to matches
matches = []
for pathId in path[1:]:
for srchPth in searchIn:
if pathId in srchPth[1:]:
matches.append(srchPth)
if len(matches) > 0:
try:
nearestLen = findNearest(matches,pathId)
lengthToPathId = findLengthToId(path,pathId)
except Exception, e:
return '0.00'
else:
nearestTaxonLengths.append(nearestLen+lengthToPathId)
break
totAllLengths = sum(nearestTaxonLengths)
meanNearestTaxonDist = totAllLengths/float(len(nearestTaxonLengths))
return meanNearestTaxonDist
def calculateMNTDPerSite(X):
f = open('/home/jcavner/AfricaCovarianceTest/MNTD_PerSite.txt','w')
for i,site in enumerate(X):
print i
# one-dimensional array of where presences are in matrix,
# in other words mx or mtrxIdx
presencePosinSite = np.where(site==1.0)[0]
# remember not species in pam are in tree necessarily
if len(presencePosinSite) > 1:
allPathsForSite = []
for presencePos in presencePosinSite:
if presencePos in tipPathsDict:
tipPath = tipPathsDict[presencePos]
allPathsForSite.append(tipPath)
if len(allPathsForSite) >= 2:
MNTD = calcMNTD(allPathsForSite)
f.write('%s,\n' % (MNTD))
else:
f.write('0.00,\n')
else:
f.write('0.00,\n')
f.close()
def plotCovarianceAgainstStats(statAgainst,title):
basePath = '/home/jcavner/Pooka8/WhiteBelliedRat_627'
covReader = reader(open(os.path.join(basePath,'cov_fromShp.csv')),delimiter=',')
statsReader = reader(open(os.path.join(basePath,'stats_fromShp.csv')),delimiter=',')
covList = list(covReader)[1:]
statsList = list(statsReader)[1:]
statsVector = []
covVector = []
for cov,stats in zip(covList,statsList):
if stats[statAgainst] != '':
statistic = float(stats[statAgainst].replace(' ','')) # replace leading zeros
statsVector.append(statistic)
covariance = float(cov[8])
covVector.append(covariance)
covArray = np.array(covVector)
statArray = np.array(statsVector)
plt.scatter(statArray,covArray)
plt.title(title)
plt.show()
def plot_sstd_rad():
basePath = '/home/jcavner/AfricaCovarianceTest'
ss_td_mntd_reader = reader(open(os.path.join(basePath,'ss_td_mntd_fromShp.csv')),delimiter=',')
rad_reader = reader(open(os.path.join(basePath,'rad.csv')),delimiter=',')
ss_tdLL = list(ss_td_mntd_reader)[1:]
radLL = list(rad_reader)[1:]
temprad = list(radLL)
radLL = [site for site in temprad if site[3] != '']
idxToRemove = [i for i,site in enumerate(temprad) if site[3] == '']
tempss = list(ss_tdLL)
ss_tdLL = [site for site in tempss if tempss.index(site) not in idxToRemove]
#ss_tdLL = [site for site in tempss if site[1] != ' 0.00000000' and site[2] != ' 0.00000000']
print "LEN SS ",len(ss_tdLL)
print "LEN RAD ",len(radLL)
ss_td_a = np.array(ss_tdLL)
rad_a = np.array(radLL)
radColumns = { 'phi':3, 'richness': 4, 'propSpecDiv':5, 'avgpropRaSize':6 }
ss_tdColumns = {'td_ssCov':1,'MNTD':2}
for radItem in radColumns.items():
for ss_tdItem in ss_tdColumns.items():
title = 'X: %s Y: %s' % (ss_tdItem[0],radItem[0],)
ax = plt.figure().add_subplot(111)
ax.set_title(title)
radCol = rad_a[:,radItem[1]]
ts_ssCol = ss_td_a[:,ss_tdItem[1]]
rad = map(float,map(lambda x: x.replace(' ',' '),radCol))
ts_ss = map(float,map(lambda x: x.replace(' ',' '),ts_ssCol))
ax.scatter(ts_ss, rad) # X axis, Y axis
plt.show()
tjson = """
{
"pathId":"0",
"children":[
{
"pathId":"1",
"path":"1,0",
"length":"8",
"children":[
{"pathId":"5",
"path":"5,1,0",
"length":"1",
"children":[
{"pathId":"7","path":"7,5,1,0","length":""},
{"pathId":"8","path":"8,5,1,0","length":"2","mx":"1"}
]
},
{"pathId":"6",
"path":"6,1,0",
"length":"4",
"children":[
{"pathId":"9","path":"9,6,1,0","length":""},
{"pathId":"10","path":"10,6,1,0","length":"3","mx":"0"}
]
}
]
},
{
"pathId":"2",
"path":"2,0",
"length":"2",
"children":[
{"pathId":"3","path":"3,2,0","length":""},
{"pathId":"4","path":"4,2,0","length":"4","mx":"2"}
]
}
]
}"""
allClades = {}
tipPathsDict = {}
bL = buildLeaves
# need to read in tree that matches pam
#import json
#
#baseUrl = '/home/jcavner/AfricaCovarianceTest/'
#
#treePath = os.path.join(baseUrl,'tree.json')
#tjson = open(treePath,'r').read()
#
#pamPath = os.path.join(baseUrl,'pam_512.npy')
#pam = np.load(pamPath)
#
#treeDict = json.loads(str(tjson))
#buildLeaves(treeDict)
#
##calculateMNTDPerSite(pam)
#buildTD_SSCov(pam)
plot_sstd_rad()
#################
#plotCovarianceAgainstStats(3,'phi')
| gpl-2.0 |
drandykass/fatiando | fatiando/gravmag/transform.py | 3 | 21318 | """
Potential field transformations, like upward continuation and derivatives.
.. note:: Most, if not all, functions here required gridded data.
**Transformations**
* :func:`~fatiando.gravmag.transform.upcontinue`: Upward continuation of
gridded potential field data on a level surface.
* :func:`~fatiando.gravmag.transform.reduce_to_pole`: Reduce the total field
magnetic anomaly to the pole.
* :func:`~fatiando.gravmag.transform.tga`: Calculate the amplitude of the
total gradient (also called the analytic signal)
* :func:`~fatiando.gravmag.transform.tilt`: Calculates the tilt angle
* :func:`~fatiando.gravmag.transform.power_density_spectra`: Calculates
the Power Density Spectra of a gridded potential field data.
* :func:`~fatiando.gravmag.transform.radial_average`: Calculates the
the radial average of a Power Density Spectra using concentring rings.
**Derivatives**
* :func:`~fatiando.gravmag.transform.derivx`: Calculate the n-th order
derivative of a potential field in the x-direction (North-South)
* :func:`~fatiando.gravmag.transform.derivy`: Calculate the n-th order
derivative of a potential field in the y-direction (East-West)
* :func:`~fatiando.gravmag.transform.derivz`: Calculate the n-th order
derivative of a potential field in the z-direction
----
"""
from __future__ import division, absolute_import
import warnings
import numpy
from .. import utils
def reduce_to_pole(x, y, data, shape, inc, dec, sinc, sdec):
r"""
Reduce total field magnetic anomaly data to the pole.
The reduction to the pole if a phase transformation that can be applied to
total field magnetic anomaly data. It "simulates" how the data would be if
**both** the Geomagnetic field and the magnetization of the source were
vertical (:math:`90^\circ` inclination) (Blakely, 1996).
This functions performs the reduction in the frequency domain (using the
FFT). The transform filter is (in the frequency domain):
.. math::
RTP(k_x, k_y) = \frac{|k|}{
a_1 k_x^2 + a_2 k_y^2 + a_3 k_x k_y +
i|k|(b_1 k_x + b_2 k_y)}
in which :math:`k_x` and :math:`k_y` are the wave-numbers in the x and y
directions and
.. math::
|k| = \sqrt{k_x^2 + k_y^2} \\
a_1 = m_z f_z - m_x f_x \\
a_2 = m_z f_z - m_y f_y \\
a_3 = -m_y f_x - m_x f_y \\
b_1 = m_x f_z + m_z f_x \\
b_2 = m_y f_z + m_z f_y
:math:`\mathbf{m} = (m_x, m_y, m_z)` is the unit-vector of the total
magnetization of the source and
:math:`\mathbf{f} = (f_x, f_y, f_z)` is the unit-vector of the Geomagnetic
field.
.. note:: Requires gridded data.
.. warning::
The magnetization direction of the anomaly source is crucial to the
reduction-to-the-pole.
**Wrong values of *sinc* and *sdec* will lead to a wrong reduction.**
Parameters:
* x, y : 1d-arrays
The x, y, z coordinates of each data point.
* data : 1d-array
The total field anomaly data at each point.
* shape : tuple = (nx, ny)
The shape of the data grid
* inc, dec : floats
The inclination and declination of the inducing Geomagnetic field
* sinc, sdec : floats
The inclination and declination of the total magnetization of the
anomaly source. The total magnetization is the vector sum of the
induced and remanent magnetization. If there is only induced
magnetization, use the *inc* and *dec* of the Geomagnetic field.
Returns:
* rtp : 1d-array
The data reduced to the pole.
References:
Blakely, R. J. (1996), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
fx, fy, fz = utils.ang2vec(1, inc, dec)
if sinc is None or sdec is None:
mx, my, mz = fx, fy, fz
else:
mx, my, mz = utils.ang2vec(1, sinc, sdec)
kx, ky = [k for k in _fftfreqs(x, y, shape, shape)]
kz_sqr = kx**2 + ky**2
a1 = mz*fz - mx*fx
a2 = mz*fz - my*fy
a3 = -my*fx - mx*fy
b1 = mx*fz + mz*fx
b2 = my*fz + mz*fy
# The division gives a RuntimeWarning because of the zero frequency term.
# This suppresses the warning.
with numpy.errstate(divide='ignore', invalid='ignore'):
rtp = (kz_sqr)/(a1*kx**2 + a2*ky**2 + a3*kx*ky +
1j*numpy.sqrt(kz_sqr)*(b1*kx + b2*ky))
rtp[0, 0] = 0
ft_pole = rtp*numpy.fft.fft2(numpy.reshape(data, shape))
return numpy.real(numpy.fft.ifft2(ft_pole)).ravel()
def upcontinue(x, y, data, shape, height):
r"""
Upward continuation of potential field data.
Calculates the continuation through the Fast Fourier Transform in the
wavenumber domain (Blakely, 1996):
.. math::
F\{h_{up}\} = F\{h\} e^{-\Delta z |k|}
and then transformed back to the space domain. :math:`h_{up}` is the upward
continue data, :math:`\Delta z` is the height increase, :math:`F` denotes
the Fourier Transform, and :math:`|k|` is the wavenumber modulus.
.. note:: Requires gridded data.
.. note:: x, y, z and height should be in meters.
.. note::
It is not possible to get the FFT of a masked grid. The default
:func:`fatiando.gridder.interp` call using minimum curvature will not
be suitable. Use ``extrapolate=True`` or ``algorithm='nearest'`` to
get an unmasked grid.
Parameters:
* x, y : 1D-arrays
The x and y coordinates of the grid points
* data : 1D-array
The potential field at the grid points
* shape : tuple = (nx, ny)
The shape of the grid
* height : float
The height increase (delta z) in meters.
Returns:
* cont : array
The upward continued data
References:
Blakely, R. J. (1996), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
assert x.shape == y.shape, \
"x and y arrays must have same shape"
if height <= 0:
warnings.warn("Using 'height' <= 0 means downward continuation, " +
"which is known to be unstable.")
nx, ny = shape
# Pad the array with the edge values to avoid instability
padded, padx, pady = _pad_data(data, shape)
kx, ky = _fftfreqs(x, y, shape, padded.shape)
kz = numpy.sqrt(kx**2 + ky**2)
upcont_ft = numpy.fft.fft2(padded)*numpy.exp(-height*kz)
cont = numpy.real(numpy.fft.ifft2(upcont_ft))
# Remove padding
cont = cont[padx: padx + nx, pady: pady + ny].ravel()
return cont
def _upcontinue_space(x, y, data, shape, height):
"""
Upward continuation using the space-domain formula.
DEPRECATED. Use the better implementation using FFT. Kept here for
historical reasons.
"""
nx, ny = shape
dx = (x.max() - x.min())/(nx - 1)
dy = (y.max() - y.min())/(ny - 1)
area = dx*dy
deltaz_sqr = (height)**2
cont = numpy.zeros_like(data)
for i, j, g in zip(x, y, data):
cont += g*area*((x - i)**2 + (y - j)**2 + deltaz_sqr)**(-1.5)
cont *= abs(height)/(2*numpy.pi)
return cont
def tga(x, y, data, shape, method='fd'):
r"""
Calculate the total gradient amplitude (TGA).
This the same as the `3D analytic signal` of Roest et al. (1992), but we
prefer the newer, more descriptive nomenclature suggested by Reid (2012).
The TGA is defined as the amplitude of the gradient vector of a potential
field :math:`T` (e.g. the magnetic total field anomaly):
.. math::
TGA = \sqrt{
\left(\frac{\partial T}{\partial x}\right)^2 +
\left(\frac{\partial T}{\partial y}\right)^2 +
\left(\frac{\partial T}{\partial z}\right)^2 }
.. note:: Requires gridded data.
.. warning::
If the data is not in SI units, the derivatives will be in
strange units and so will the total gradient amplitude! I strongly
recommend converting the data to SI **before** calculating the
TGA is you need the gradient in Eotvos (use one of the unit conversion
functions of :mod:`fatiando.utils`).
Parameters:
* x, y : 1D-arrays
The x and y coordinates of the grid points
* data : 1D-array
The potential field at the grid points
* shape : tuple = (nx, ny)
The shape of the grid
* method : string
The method used to calculate the horizontal derivatives. Options are:
``'fd'`` for finite-difference (more stable) or ``'fft'`` for the Fast
Fourier Transform. The z derivative is always calculated by FFT.
Returns:
* tga : 1D-array
The amplitude of the total gradient
References:
Reid, A. (2012), Forgotten truths, myths and sacred cows of Potential
Fields Geophysics - II, in SEG Technical Program Expanded Abstracts 2012,
pp. 1-3, Society of Exploration Geophysicists.
Roest, W., J. Verhoef, and M. Pilkington (1992), Magnetic interpretation
using the 3-D analytic signal, GEOPHYSICS, 57(1), 116-125,
doi:10.1190/1.1443174.
"""
dx = derivx(x, y, data, shape, method=method)
dy = derivy(x, y, data, shape, method=method)
dz = derivz(x, y, data, shape)
res = numpy.sqrt(dx ** 2 + dy ** 2 + dz ** 2)
return res
def tilt(x, y, data, shape, xderiv=None, yderiv=None, zderiv=None):
r"""
Calculates the potential field tilt, as defined by Miller and Singh (1994)
.. math::
tilt(f) = tan^{-1}\left(
\frac{
\frac{\partial T}{\partial z}}{
\sqrt{\frac{\partial T}{\partial x}^2 +
\frac{\partial T}{\partial y}^2}}
\right)
When used on magnetic total field anomaly data, works best if the data is
reduced to the pole.
It's useful to plot the zero contour line of the tilt to represent possible
outlines of the source bodies. Use matplotlib's ``pyplot.contour`` or
``pyplot.tricontour`` for this.
.. note::
Requires gridded data if ``xderiv``, ``yderiv`` and ``zderiv`` are not
given.
Parameters:
* x, y : 1D-arrays
The x and y coordinates of the grid points
* data : 1D-array
The potential field at the grid points
* shape : tuple = (nx, ny)
The shape of the grid. Ignored if *xderiv*, *yderiv* and *zderiv* are
given.
* xderiv : 1D-array or None
Optional. Values of the derivative in the x direction.
If ``None``, will calculated using the default options of
:func:`~fatiando.gravmag.transform.derivx`
* yderiv : 1D-array or None
Optional. Values of the derivative in the y direction.
If ``None``, will calculated using the default options of
:func:`~fatiando.gravmag.transform.derivy`
* zderiv : 1D-array or None
Optional. Values of the derivative in the z direction.
If ``None``, will calculated using the default options of
:func:`~fatiando.gravmag.transform.derivz`
Returns:
* tilt : 1D-array
The tilt angle of the total field in radians.
References:
Miller, Hugh G, and Vijay Singh. 1994. "Potential Field Tilt --- a New
Concept for Location of Potential Field Sources."
Journal of Applied Geophysics 32 (2--3): 213-17.
doi:10.1016/0926-9851(94)90022-1.
"""
if xderiv is None:
xderiv = derivx(x, y, data, shape)
if yderiv is None:
yderiv = derivy(x, y, data, shape)
if zderiv is None:
zderiv = derivz(x, y, data, shape)
horiz_deriv = numpy.sqrt(xderiv**2 + yderiv**2)
tilt = numpy.arctan2(zderiv, horiz_deriv)
return tilt
def derivx(x, y, data, shape, order=1, method='fd'):
"""
Calculate the derivative of a potential field in the x direction.
.. note:: Requires gridded data.
.. warning::
If the data is not in SI units, the derivative will be in
strange units! I strongly recommend converting the data to SI
**before** calculating the derivative (use one of the unit conversion
functions of :mod:`fatiando.utils`). This way the derivative will be in
SI units and can be easily converted to what unit you want.
Parameters:
* x, y : 1D-arrays
The x and y coordinates of the grid points
* data : 1D-array
The potential field at the grid points
* shape : tuple = (nx, ny)
The shape of the grid
* order : int
The order of the derivative
* method : string
The method used to calculate the derivatives. Options are:
``'fd'`` for central finite-differences (more stable) or ``'fft'``
for the Fast Fourier Transform.
Returns:
* deriv : 1D-array
The derivative
"""
nx, ny = shape
assert method in ['fft', 'fd'], \
'Invalid method "{}".'.format(method)
if method == 'fft':
# Pad the array with the edge values to avoid instability
padded, padx, pady = _pad_data(data, shape)
kx, _ = _fftfreqs(x, y, shape, padded.shape)
deriv_ft = numpy.fft.fft2(padded)*(kx*1j)**order
deriv_pad = numpy.real(numpy.fft.ifft2(deriv_ft))
# Remove padding from derivative
deriv = deriv_pad[padx: padx + nx, pady: pady + ny]
elif method == 'fd':
datamat = data.reshape(shape)
dx = (x.max() - x.min())/(nx - 1)
deriv = numpy.empty_like(datamat)
deriv[1:-1, :] = (datamat[2:, :] - datamat[:-2, :])/(2*dx)
deriv[0, :] = deriv[1, :]
deriv[-1, :] = deriv[-2, :]
if order > 1:
deriv = derivx(x, y, deriv, shape, order=order - 1, method='fd')
return deriv.ravel()
def derivy(x, y, data, shape, order=1, method='fd'):
"""
Calculate the derivative of a potential field in the y direction.
.. note:: Requires gridded data.
.. warning::
If the data is not in SI units, the derivative will be in
strange units! I strongly recommend converting the data to SI
**before** calculating the derivative (use one of the unit conversion
functions of :mod:`fatiando.utils`). This way the derivative will be in
SI units and can be easily converted to what unit you want.
Parameters:
* x, y : 1D-arrays
The x and y coordinates of the grid points
* data : 1D-array
The potential field at the grid points
* shape : tuple = (nx, ny)
The shape of the grid
* order : int
The order of the derivative
* method : string
The method used to calculate the derivatives. Options are:
``'fd'`` for central finite-differences (more stable) or ``'fft'``
for the Fast Fourier Transform.
Returns:
* deriv : 1D-array
The derivative
"""
nx, ny = shape
assert method in ['fft', 'fd'], \
'Invalid method "{}".'.format(method)
if method == 'fft':
# Pad the array with the edge values to avoid instability
padded, padx, pady = _pad_data(data, shape)
_, ky = _fftfreqs(x, y, shape, padded.shape)
deriv_ft = numpy.fft.fft2(padded)*(ky*1j)**order
deriv_pad = numpy.real(numpy.fft.ifft2(deriv_ft))
# Remove padding from derivative
deriv = deriv_pad[padx: padx + nx, pady: pady + ny]
elif method == 'fd':
datamat = data.reshape(shape)
dy = (y.max() - y.min())/(ny - 1)
deriv = numpy.empty_like(datamat)
deriv[:, 1:-1] = (datamat[:, 2:] - datamat[:, :-2])/(2*dy)
deriv[:, 0] = deriv[:, 1]
deriv[:, -1] = deriv[:, -2]
if order > 1:
deriv = derivy(x, y, deriv, shape, order=order - 1, method='fd')
return deriv.ravel()
def derivz(x, y, data, shape, order=1, method='fft'):
"""
Calculate the derivative of a potential field in the z direction.
.. note:: Requires gridded data.
.. warning::
If the data is not in SI units, the derivative will be in
strange units! I strongly recommend converting the data to SI
**before** calculating the derivative (use one of the unit conversion
functions of :mod:`fatiando.utils`). This way the derivative will be in
SI units and can be easily converted to what unit you want.
Parameters:
* x, y : 1D-arrays
The x and y coordinates of the grid points
* data : 1D-array
The potential field at the grid points
* shape : tuple = (nx, ny)
The shape of the grid
* order : int
The order of the derivative
* method : string
The method used to calculate the derivatives. Options are:
``'fft'`` for the Fast Fourier Transform.
Returns:
* deriv : 1D-array
The derivative
"""
assert method == 'fft', \
"Invalid method '{}'".format(method)
nx, ny = shape
# Pad the array with the edge values to avoid instability
padded, padx, pady = _pad_data(data, shape)
kx, ky = _fftfreqs(x, y, shape, padded.shape)
deriv_ft = numpy.fft.fft2(padded)*numpy.sqrt(kx**2 + ky**2)**order
deriv = numpy.real(numpy.fft.ifft2(deriv_ft))
# Remove padding from derivative
return deriv[padx: padx + nx, pady: pady + ny].ravel()
def power_density_spectra(x, y, data, shape):
r"""
Calculates the Power Density Spectra of a 2D gridded potential field
through the FFT:
.. math::
\Phi_{\Delta T}(k_x, k_y) = | F\left{\Delta T \right}(k_x, k_y) |^2
.. note:: Requires gridded data.
.. note:: x, y, z and height should be in meters.
Parameters:
* x, y : 1D-arrays
The x and y coordinates of the grid points
* data : 1D-array
The potential field at the grid points
* shape : tuple = (nx, ny)
The shape of the grid
Returns:
* kx, ky : 2D-arrays
The wavenumbers of each Power Density Spectra point
* pds : 2D-array
The Power Density Spectra of the data
"""
kx, ky = _fftfreqs(x, y, shape, shape)
pds = abs(numpy.fft.fft2(numpy.reshape(data, shape)))**2
return kx, ky, pds
def radial_average_spectrum(kx, ky, pds, max_radius=None, ring_width=None):
r"""
Calculates the average of the Power Density Spectra points that falls
inside concentric rings built around the origin of the wavenumber
coordinate system with constant width.
The width of the rings and the inner radius of the biggest ring can be
changed by setting the optional parameters ring_width and max_radius,
respectively.
.. note:: To calculate the radially averaged power density spectra
use the outputs of the function power_density_spectra as
input of this one.
Parameters:
* kx, ky : 2D-arrays
The x and y coordinates of the grid points
* data : 1D-array
The potential field at the grid points
* shape : tuple = (nx, ny)
The shape of the grid
* max_radius : float (optional)
Inner radius of the biggest ring.
By default it's set as the minimum of kx.max() and ky.max().
Making it smaller leaves points outside of the averaging,
and making it bigger includes points nearer to the boundaries.
* ring_width : float (optional)
Width of the rings.
By default it's set as the largest value of :math:`\Delta k_x` and
:math:`\Delta k_y`, being them the equidistances of the kx and ky
arrays.
Making it bigger gives more populated averages, and
making it smaller lowers the ammount of points per ring
(use it carefully).
Returns:
* k_radial : 1D-array
Wavenumbers of each Radially Averaged Power Spectrum point.
Also, the inner radius of the rings.
* pds_radial : 1D array
Radially Averaged Power Spectrum
"""
nx, ny = pds.shape
if max_radius is None:
max_radius = min(kx.max(), ky.max())
if ring_width is None:
ring_width = max(kx[1, 0], ky[0, 1])
k = numpy.sqrt(kx**2 + ky**2)
pds_radial = []
k_radial = []
radius_i = -1
while True:
radius_i += 1
if radius_i*ring_width > max_radius:
break
else:
if radius_i == 0:
inside = k <= 0.5*ring_width
else:
inside = numpy.logical_and(k > (radius_i - 0.5)*ring_width,
k <= (radius_i + 0.5)*ring_width)
pds_radial.append(pds[inside].mean())
k_radial.append(radius_i*ring_width)
return numpy.array(k_radial), numpy.array(pds_radial)
def _pad_data(data, shape):
n = _nextpow2(numpy.max(shape))
nx, ny = shape
padx = (n - nx)//2
pady = (n - ny)//2
padded = numpy.pad(data.reshape(shape), ((padx, padx), (pady, pady)),
mode='edge')
return padded, padx, pady
def _nextpow2(i):
buf = numpy.ceil(numpy.log(i)/numpy.log(2))
return int(2**buf)
def _fftfreqs(x, y, shape, padshape):
"""
Get two 2D-arrays with the wave numbers in the x and y directions.
"""
nx, ny = shape
dx = (x.max() - x.min())/(nx - 1)
fx = 2*numpy.pi*numpy.fft.fftfreq(padshape[0], dx)
dy = (y.max() - y.min())/(ny - 1)
fy = 2*numpy.pi*numpy.fft.fftfreq(padshape[1], dy)
return numpy.meshgrid(fy, fx)[::-1]
| bsd-3-clause |
aabadie/scikit-learn | sklearn/feature_selection/__init__.py | 140 | 1302 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
from .from_model import SelectFromModel
from .mutual_info_ import mutual_info_regression, mutual_info_classif
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectFromModel',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression',
'mutual_info_classif',
'mutual_info_regression']
| bsd-3-clause |
pypot/scikit-learn | sklearn/ensemble/gradient_boosting.py | 126 | 65552 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, loss_.K]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
cuemacro/findatapy | findatapy/market/datavendorweb.py | 1 | 108889 | __author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""Contains implementations of DataVendor for
Quandl (free/premium data source) - DataVendorQuandl
ALFRED (free datasource) - DataVendorALFRED
Pandas Data Reader (free data source - includes FRED, World Bank, Yahoo) - DataVendorPandasWeb
DukasCopy (retail FX broker - has historical tick data) - DataVendorDukasCopy
ONS (free datasource) - DataVendorONS (incomplete)
BOE (free datasource) - DataVendorBOE (incomplete)
Bitcoinchart - DataVendorBitcoincharts
"""
#######################################################################################################################
import sys
import os
import json
import datetime
from datetime import datetime
from datetime import timedelta
import time as time_library
import re
import concurrent.futures
import requests
import pandas as pd
# support Quandl 3.x.x
try:
import quandl as Quandl
except:
# if import fails use Quandl 2.x.x
import Quandl
from findatapy.market import IOEngine
# Abstract class on which this is based
from findatapy.market.datavendor import DataVendor
# For logging and constants
from findatapy.util import ConfigManager, DataConstants, LoggerManager
import pandas
class DataVendorQuandl(DataVendor):
"""Reads in data from Quandl into findatapy library
"""
def __init__(self):
super(DataVendorQuandl, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request Quandl data")
data_frame = self.download_daily(market_data_request_vendor)
if data_frame is None or data_frame.index is []: return None
# convert from vendor to findatapy tickers/fields
if data_frame is not None:
returned_tickers = data_frame.columns
if data_frame is not None:
# tidy up tickers into a format that is more easily translatable
# we can often get multiple fields returned (even if we don't ask for them!)
# convert to lower case
returned_fields = [(x.split(' - ')[1]).lower().replace(' ', '-').replace('.', '-').replace('--', '-') for x
in returned_tickers]
returned_fields = [x.replace('value', 'close') for x in returned_fields] # special case for close
# quandl doesn't always return the field name
for i in range(0, len(returned_fields)):
ticker = returned_tickers[i].split('/')[1].split(' - ')[0].lower()
if ticker == returned_fields[i]:
returned_fields[i] = 'close'
# replace time fields (can cause problems later for times to start with 0)
for i in range(0, 10):
returned_fields = [x.replace('0' + str(i) + ':00', str(i) + ':00') for x in returned_fields]
returned_tickers = [x.replace('.', '/') for x in returned_tickers]
returned_tickers = [x.split(' - ')[0] for x in returned_tickers]
try:
fields = self.translate_from_vendor_field(returned_fields, market_data_request)
tickers = self.translate_from_vendor_ticker(returned_tickers, market_data_request)
except:
print('error')
ticker_combined = []
for i in range(0, len(tickers)):
try:
ticker_combined.append(tickers[i] + "." + fields[i])
except:
ticker_combined.append(tickers[i] + ".close")
data_frame.columns = ticker_combined
data_frame.index.name = 'Date'
logger.info("Completed request from Quandl for " + str(ticker_combined))
return data_frame
def download_daily(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
trials = 0
data_frame = None
while (trials < 5):
try:
data_frame = Quandl.get(market_data_request.tickers,
authtoken=market_data_request.quandl_api_key,
trim_start=market_data_request.start_date,
trim_end=market_data_request.finish_date)
break
except SyntaxError:
logger.error("The tickers %s do not exist on Quandl." % market_data_request.tickers)
break
except Exception as e:
trials = trials + 1
logger.info(
"Attempting... " + str(trials) + " request to download from Quandl due to following error: " + str(
e))
if trials == 5:
logger.error("Couldn't download from Quandl after several attempts!")
return data_frame
#######################################################################################################################
# support Eikon
try:
import asyncio
asyncio.set_event_loop(asyncio.SelectorEventLoop())
import eikon as ek
except:
logger = LoggerManager().getLogger(__name__)
logger.info("Did not load Eikon library")
# ek.set_port_number(9400)
# ek.set_port_number(9000)
class DataVendorEikon(DataVendor):
"""Reads in data from Eikon into findatapy library
"""
def __init__(self):
super(DataVendorEikon, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request Eikon data")
data_frame = self.download(market_data_request_vendor)
if data_frame is None or data_frame.index is []: return None
# Convert from vendor to findatapy tickers/fields
# if data_frame is not None:
# returned_tickers = data_frame.columns
if data_frame is not None:
# Tidy up tickers into a format that is more easily translatable
# we can often get multiple fields returned (even if we don't ask for them!)
# convert to lower case
returned_fields = market_data_request_vendor.fields
returned_tickers = []
for vi in market_data_request_vendor.tickers:
for f in market_data_request_vendor.fields:
returned_tickers.append(vi)
try:
fields = self.translate_from_vendor_field(returned_fields, market_data_request)
tickers = self.translate_from_vendor_ticker(returned_tickers, market_data_request)
except Exception:
print('error')
ticker_combined = []
for i in range(0, len(tickers)):
try:
ticker_combined.append(tickers[i] + "." + fields[i])
except:
ticker_combined.append(tickers[i] + ".close")
data_frame.columns = ticker_combined
data_frame.index.name = 'Date'
logger.info("Completed request from Eikon for " + str(ticker_combined))
# print(data_frame)
return data_frame
def download(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
trials = 0
data_frame = None
if market_data_request.freq == 'tick':
freq = 'taq' # Unofficial support https://community.developers.refinitiv.com/questions/48616/how-do-i-get-historical-ticks-using-python-eikon-p.html
elif market_data_request.freq == 'daily':
freq = 'daily'
else:
freq = 'minute'
while (trials < 5):
try:
# Can sometimes fail first time around
ek.set_app_key(market_data_request.eikon_api_key)
# ek.set_port_number(9000)
data_frame = ek.get_timeseries(market_data_request.tickers,
start_date=market_data_request.start_date.strftime("%Y-%m-%dT%H:%M:%S"),
end_date=market_data_request.finish_date.strftime("%Y-%m-%dT%H:%M:%S"),
fields=market_data_request.fields,
interval=freq
)
break
except SyntaxError:
logger.error("The tickers %s do not exist on Eikon." % market_data_request.tickers)
break
except Exception as e:
trials = trials + 1
logger.info(
"Attempting... " + str(trials) + " request to download from Eikon due to following error: " + str(
e))
if trials == 5:
logger.error("Couldn't download from Eikon after several attempts!")
return data_frame
#######################################################################################################################
# # support Quandl 3.x.x
# try:
# import fredapi
# from fredapi import Fred
# except:
# pass
from findatapy.market.datavendor import DataVendor
from findatapy.timeseries import Filter, Calculations
class DataVendorALFRED(DataVendor):
"""Class for reading in data from ALFRED (and FRED) into findatapy library (based upon fredapi from
https://github.com/mortada/fredapi
"""
def __init__(self):
super(DataVendorALFRED, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request ALFRED/FRED data")
data_frame = self.download_daily(market_data_request_vendor)
if data_frame is None or data_frame.index is []: return None
# convert from vendor to findatapy tickers/fields
if data_frame is not None:
returned_tickers = data_frame.columns
if data_frame is not None:
# tidy up tickers into a format that is more easily translatable
# we can often get multiple fields returned (even if we don't ask for them!)
# convert to lower case
returned_fields = [(x.split('.')[1]) for x in returned_tickers]
returned_tickers = [(x.split('.')[0]) for x in returned_tickers]
try:
fields = self.translate_from_vendor_field(returned_fields, market_data_request)
tickers = self.translate_from_vendor_ticker(returned_tickers, market_data_request)
except:
print('error')
ticker_combined = []
for i in range(0, len(fields)):
ticker_combined.append(tickers[i] + "." + fields[i])
data_frame.columns = ticker_combined
data_frame.index.name = 'Date'
logger.info("Completed request from ALFRED/FRED for " + str(ticker_combined))
return data_frame
def download_daily(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
trials = 0
data_frame_list = []
data_frame_release = []
# TODO refactor this code, a bit messy at the moment!
for i in range(0, len(market_data_request.tickers)):
while (trials < 5):
try:
fred = Fred(api_key=market_data_request.fred_api_key)
# acceptable fields: close, actual-release, release-date-time-full
if 'close' in market_data_request.fields and 'release-date-time-full' in market_data_request.fields:
data_frame = fred.get_series_all_releases(market_data_request.tickers[i],
observation_start=market_data_request.start_date,
observation_end=market_data_request.finish_date)
data_frame = data_frame.rename(
columns={"realtime_start": market_data_request.tickers[i] + '.release-date-time-full',
"date": "Date",
"value": market_data_request.tickers[i] + '.close'})
data_frame = data_frame.sort_values(
by=['Date', market_data_request.tickers[i] + '.release-date-time-full'])
data_frame = data_frame.drop_duplicates(subset=['Date'], keep='last')
data_frame = data_frame.set_index(['Date'])
filter = Filter()
data_frame = filter.filter_time_series_by_date(market_data_request.start_date,
market_data_request.finish_date, data_frame)
data_frame_list.append(data_frame)
elif 'close' in market_data_request.fields:
data_frame = fred.get_series(series_id=market_data_request.tickers[i],
observation_start=market_data_request.start_date,
observation_end=market_data_request.finish_date)
data_frame = data_frame.to_frame(name=market_data_request.tickers[i] + '.close')
data_frame.index.name = 'Date'
data_frame_list.append(data_frame)
if 'first-revision' in market_data_request.fields:
data_frame = fred.get_series_first_revision(market_data_request.tickers[i],
observation_start=market_data_request.start_date,
observation_end=market_data_request.finish_date)
data_frame = data_frame.to_frame(name=market_data_request.tickers[i] + '.first-revision')
data_frame.index.name = 'Date'
filter = Filter()
data_frame = filter.filter_time_series_by_date(market_data_request.start_date,
market_data_request.finish_date, data_frame)
data_frame_list.append(data_frame)
if 'actual-release' in market_data_request.fields and 'release-date-time-full' in market_data_request.fields:
data_frame = fred.get_series_all_releases(market_data_request.tickers[i],
observation_start=market_data_request.start_date,
observation_end=market_data_request.finish_date)
data_frame = data_frame.rename(
columns={"realtime_start": market_data_request.tickers[i] + '.release-date-time-full',
"date": "Date",
"value": market_data_request.tickers[i] + '.actual-release'})
data_frame = data_frame.sort_values(
by=['Date', market_data_request.tickers[i] + '.release-date-time-full'])
data_frame = data_frame.drop_duplicates(subset=['Date'], keep='first')
data_frame = data_frame.set_index(['Date'])
filter = Filter()
data_frame = filter.filter_time_series_by_date(market_data_request.start_date,
market_data_request.finish_date, data_frame)
data_frame_list.append(data_frame)
elif 'actual-release' in market_data_request.fields:
data_frame = fred.get_series_first_release(market_data_request.tickers[i],
observation_start=market_data_request.start_date,
observation_end=market_data_request.finish_date)
data_frame = data_frame.to_frame(name=market_data_request.tickers[i] + '.actual-release')
data_frame.index.name = 'Date'
# data_frame = data_frame.rename(columns={"value" : md_request.tickers[i] + '.actual-release'})
filter = Filter()
data_frame = filter.filter_time_series_by_date(market_data_request.start_date,
market_data_request.finish_date, data_frame)
data_frame_list.append(data_frame)
elif 'release-date-time-full' in market_data_request.fields:
data_frame = fred.get_series_all_releases(market_data_request.tickers[i],
observation_start=market_data_request.start_date,
observation_end=market_data_request.finish_date)
data_frame = data_frame['realtime_start']
data_frame = data_frame.to_frame(
name=market_data_request.tickers[i] + '.release-date-time-full')
data_frame.index = data_frame[market_data_request.tickers[i] + '.release-date-time-full']
data_frame = data_frame.sort_index()
data_frame = data_frame.drop_duplicates()
filter = Filter()
data_frame_release.append(filter.filter_time_series_by_date(market_data_request.start_date,
market_data_request.finish_date,
data_frame))
break
except Exception as e:
trials = trials + 1
logger.info("Attempting... " + str(trials) + " request to download from ALFRED/FRED" + str(e))
if trials == 5:
logger.error("Couldn't download from ALFRED/FRED after several attempts!")
calc = Calculations()
data_frame1 = calc.join(data_frame_list, how='outer')
data_frame2 = calc.join(data_frame_release, how='outer')
data_frame = pandas.concat([data_frame1, data_frame2], axis=1)
return data_frame
#######################################################################################################################
class DataVendorONS(DataVendor):
def __init__(self):
super(DataVendorONS, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request ONS data")
data_frame = self.download_daily(market_data_request_vendor)
if data_frame is None or data_frame.index is []: return None
# convert from vendor to findatapy tickers/fields
if data_frame is not None:
returned_tickers = data_frame.columns
if data_frame is not None:
# tidy up tickers into a format that is more easily translatable
# we can often get multiple fields returned (even if we don't ask for them!)
# convert to lower case
returned_fields = [(x.split(' - ')[1]).lower().replace(' ', '-') for x in returned_tickers]
returned_fields = [x.replace('value', 'close') for x in returned_fields] # special case for close
returned_tickers = [x.replace('.', '/') for x in returned_tickers]
returned_tickers = [x.split(' - ')[0] for x in returned_tickers]
fields = self.translate_from_vendor_field(returned_fields, market_data_request)
tickers = self.translate_from_vendor_ticker(returned_tickers, market_data_request)
ticker_combined = []
for i in range(0, len(fields)):
ticker_combined.append(tickers[i] + "." + fields[i])
data_frame.columns = ticker_combined
data_frame.index.name = 'Date'
logger.info("Completed request from ONS.")
return data_frame
def download_daily(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
trials = 0
data_frame = None
while (trials < 5):
try:
# TODO
break
except:
trials = trials + 1
logger.info("Attempting... " + str(trials) + " request to download from ONS")
if trials == 5:
logger.error("Couldn't download from ONS after several attempts!")
return data_frame
#######################################################################################################################
class DataVendorBOE(DataVendor):
def __init__(self):
super(DataVendorBOE, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request BOE data")
data_frame = self.download_daily(market_data_request_vendor)
if data_frame is None or data_frame.index is []:
return None
# convert from vendor to findatapy tickers/fields
if data_frame is not None:
if len(market_data_request.fields) == 1:
data_frame.columns = data_frame.columns.str.cat(
market_data_request.fields * len(data_frame.columns), sep='.')
else:
logger.warning("Inconsistent number of fields and tickers.")
data_frame.columns = data_frame.columns.str.cat(
market_data_request.fields, sep='.')
data_frame.index.name = 'Date'
logger.info("Completed request from BOE.")
return data_frame
def download_daily(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
trials = 0
data_frame = None
boe_url = ("http://www.bankofengland.co.uk/boeapps/iadb/fromshowcolumns.asp"
"?csv.x=yes&Datefrom={start_date}&Dateto={end_date}"
"&SeriesCodes={tickers}"
"&CSVF=TN&UsingCodes=Y&VPD=Y&VFD=N")
start_time = market_data_request.start_date.strftime("%d/%b/%Y")
end_time = market_data_request.finish_date.strftime("%d/%b/%Y")
while (trials < 5):
try:
data_frame = pd.read_csv(boe_url.format(start_date=start_time, end_date=end_time,
tickers=','.join(market_data_request.tickers)),
index_col='DATE')
break
except:
trials = trials + 1
logger.info("Attempting... " + str(trials) + " request to download from BOE")
if trials == 5:
logger.error("Couldn't download from BoE after several attempts!")
return data_frame
#######################################################################################################################
try:
import yfinance as yf
except:
pass
class DataVendorYahoo(DataVendor):
def __init__(self):
super(DataVendorYahoo, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request Yahoo data")
data_frame = self.download_daily(market_data_request_vendor)
if data_frame is None or data_frame.index is []:
return None
# convert from vendor to findatapy tickers/fields
if data_frame is not None:
try:
if len(market_data_request.tickers) > 1:
data_frame.columns = ['/'.join(col) for col in data_frame.columns.values]
except:
pass
returned_tickers = data_frame.columns
if data_frame is not None:
# tidy up tickers into a format that is more easily translatable
# we can often get multiple fields returned (even if we don't ask for them!)
# convert to lower case
# returned_fields = [(x.split(' - ')[1]).lower().replace(' ', '-') for x in returned_tickers]
# returned_fields = [x.replace('value', 'close') for x in returned_fields] # special case for close
returned_fields = [x.split('/')[0].lower() for x in returned_tickers]
# returned_tickers = [x.replace('.', '/') for x in returned_tickers]
returned_tickers = [x.split('/')[1] for x in returned_tickers]
fields = self.translate_from_vendor_field(returned_fields, market_data_request)
tickers = self.translate_from_vendor_ticker(returned_tickers, market_data_request)
ticker_combined = []
for i in range(0, len(fields)):
ticker_combined.append(tickers[i] + "." + fields[i])
data_frame.columns = ticker_combined
data_frame.index.name = 'Date'
logger.info("Completed request from Yahoo.")
return data_frame
def download_daily(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
trials = 0
data_frame = None
ticker_list = ' '.join(market_data_request.tickers)
data_frame = yf.download(ticker_list, start=market_data_request.start_date, end=market_data_request.finish_date)
while (trials < 5):
try:
data_frame = yf.download(ticker_list, start=market_data_request.start_date,
end=market_data_request.finish_date)
break
except Exception as e:
print(str(e))
trials = trials + 1
logger.info("Attempting... " + str(trials) + " request to download from Yahoo")
if trials == 5:
logger.error("Couldn't download from ONS after several attempts!")
if len(market_data_request.tickers) == 1:
data_frame.columns = [x + '/' + market_data_request.tickers[0] for x in data_frame.columns]
return data_frame
#######################################################################################################################
# for pandas 0.23 (necessary for older versions of pandas_datareader
try:
import pandas
pandas.core.common.is_list_like = pandas.api.types.is_list_like
except:
pass
try:
import pandas_datareader.data as web
except:
pass
from findatapy.market.datavendor import DataVendor
class DataVendorPandasWeb(DataVendor):
"""Class for reading in data from various web sources into findatapy library including
Yahoo! Finance - yahoo
Google Finance - google
St. Louis FED (FRED) - fred
Kenneth French data library - famafrench
World Bank - wb
"""
def __init__(self):
super(DataVendorPandasWeb, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request Pandas Web data")
data_frame = self.download_daily(market_data_request_vendor)
if market_data_request_vendor.data_source == 'fred':
returned_fields = ['close' for x in data_frame.columns.values]
returned_tickers = data_frame.columns.values
else:
data_frame = data_frame.to_frame().unstack()
# print(data_frame.tail())
if data_frame.index is []: return None
# convert from vendor to findatapy tickers/fields
if data_frame is not None:
returned_fields = data_frame.columns.get_level_values(0)
returned_tickers = data_frame.columns.get_level_values(1)
if data_frame is not None:
fields = self.translate_from_vendor_field(returned_fields, market_data_request)
tickers = self.translate_from_vendor_ticker(returned_tickers, market_data_request)
ticker_combined = []
for i in range(0, len(fields)):
ticker_combined.append(tickers[i] + "." + fields[i])
ticker_requested = []
for f in market_data_request.fields:
for t in market_data_request.tickers:
ticker_requested.append(t + "." + f)
data_frame.columns = ticker_combined
data_frame.index.name = 'Date'
# return all the tickers (this might be imcomplete list, but we will pad the list later)
# data_frame = pandas.DataFrame(data = data_frame[ticker_requested],
# index = data_frame.index, columns = ticker_requested)
logger.info("Completed request from Pandas Web.")
return data_frame
def download_daily(self, market_data_request):
return web.DataReader(market_data_request.tickers, market_data_request.data_source,
market_data_request.start_date, market_data_request.finish_date)
########################################################################################################################
####Bitcoin####################################################################################################################
from findatapy.market.datavendor import DataVendor
class DataVendorBitcoincharts(DataVendor):
"""Class for reading in data from various web sources into findatapy library including
"""
def __init__(self):
super(DataVendorBitcoincharts, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request data from Bitcoincharts")
data_website = 'http://api.bitcoincharts.com/v1/csv/' + market_data_request_vendor.tickers[0] + '.csv.gz'
data_frame = pandas.read_csv(data_website, names=['datetime', 'close', 'volume'])
data_frame = data_frame.set_index('datetime')
data_frame.index = pandas.to_datetime(data_frame.index, unit='s')
data_frame.index.name = 'Date'
data_frame = data_frame[(data_frame.index >= market_data_request_vendor.start_date) & (
data_frame.index <= market_data_request_vendor.finish_date)]
# data_frame = df[~df.index.duplicated(keep='last')]
if (len(data_frame) == 0):
print('###############################################################')
print('Warning: No data. Please change the start_date and finish_date.')
print('###############################################################')
data_frame.columns = [market_data_request.tickers[0] + '.close', market_data_request.tickers[0] + '.volume']
logger.info("Completed request from Bitcoincharts.")
return data_frame
########################################################################################################################
from findatapy.market.datavendor import DataVendor
class DataVendorPoloniex(DataVendor):
"""Class for reading in data from various web sources into findatapy library including
"""
def __init__(self):
super(DataVendorPoloniex, self).__init__()
# Implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request data from Poloniex")
poloniex_url = 'https://poloniex.com/public?command=returnChartData¤cyPair={}&start={}&end={}&period={}'
if market_data_request_vendor.freq == 'intraday':
period = 300
if market_data_request_vendor.freq == 'daily':
period = 86400
json_url = poloniex_url.format(market_data_request_vendor.tickers[0],
int(market_data_request_vendor.start_date.timestamp()),
int(market_data_request_vendor.finish_date.timestamp()), period)
data_frame = pandas.read_json(json_url)
data_frame = data_frame.set_index('date')
data_frame.index.name = 'Date'
if (data_frame.index[0] == 0):
print('###############################################################')
print('Warning: No data. Please change the start_date and finish_date.')
print('###############################################################')
data_frame.columns = [market_data_request.tickers[0] + '.close',
market_data_request.tickers[0] + '.high',
market_data_request.tickers[0] + '.low',
market_data_request.tickers[0] + '.open',
market_data_request.tickers[0] + '.quote-volume',
market_data_request.tickers[0] + '.volume',
market_data_request.tickers[0] + '.weighted-average']
field_selected = []
for i in range(0, len(market_data_request_vendor.fields)):
field_selected.append(0)
field_selected[-1] = market_data_request.tickers[0] + '.' + market_data_request_vendor.fields[i]
logger.info("Completed request from Poloniex")
return data_frame[field_selected]
#############################################################################################
from findatapy.market.datavendor import DataVendor
class DataVendorBinance(DataVendor):
"""Class for reading in data from various web sources into findatapy library including
"""
# Data limit = 500
def __init__(self):
super(DataVendorBinance, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request data from Binance")
import time
binance_url = 'https://www.binance.com/api/v1/klines?symbol={}&interval={}&startTime={}&endTime={}'
if market_data_request_vendor.freq == 'intraday':
period = '1m'
if market_data_request_vendor.freq == 'daily':
period = '1d'
data_frame = pandas.DataFrame(columns=[0, 1, 2, 3, 4, 5])
start_time = int(market_data_request_vendor.start_date.timestamp() * 1000)
finish_time = int(market_data_request_vendor.finish_date.timestamp() * 1000)
stop_flag = 0
while stop_flag == 0:
if stop_flag == 1:
break
json_url = binance_url.format(market_data_request_vendor.tickers[0], period, start_time, finish_time)
data_read = pandas.read_json(json_url)
if (len(data_read) < 500):
if ((len(data_read) == 0) & (len(data_frame) == 0)):
print('###############################################################')
print('Warning: No data. Please change the start_date and finish_date.')
print('###############################################################')
break
else:
stop_flag = 1
data_frame = data_frame.append(data_read)
start_time = int(data_frame[0].tail(1))
time_library.sleep(2)
if (len(data_frame) == 0):
return data_frame
data_frame.columns = ['open-time', 'open', 'high', 'low', 'close', 'volume', 'close-time', 'quote-asset-volume',
'trade-numbers', 'taker-buy-base-asset-volume', 'taker-buy-quote-asset-volume', 'ignore']
data_frame['open-time'] = data_frame['open-time'] / 1000
data_frame = data_frame.set_index('open-time')
data_frame = data_frame.drop(['close-time', 'ignore'], axis=1)
data_frame.index.name = 'Date'
data_frame.index = pandas.to_datetime(data_frame.index, unit='s')
data_frame.columns = [market_data_request.tickers[0] + '.open',
market_data_request.tickers[0] + '.high',
market_data_request.tickers[0] + '.low',
market_data_request.tickers[0] + '.close',
market_data_request.tickers[0] + '.volume',
market_data_request.tickers[0] + '.quote-asset-volume',
market_data_request.tickers[0] + '.trade-numbers',
market_data_request.tickers[0] + '.taker-buy-base-asset-volume',
market_data_request.tickers[0] + '.taker-buy-quote-asset-volume']
field_selected = []
for i in range(0, len(market_data_request_vendor.fields)):
field_selected.append(0)
field_selected[-1] = market_data_request.tickers[0] + '.' + market_data_request_vendor.fields[i]
logger.info("Completed request from Binance")
return data_frame[field_selected]
#########################################################################################################
from findatapy.market.datavendor import DataVendor
class DataVendorBitfinex(DataVendor):
"""Class for reading in data from various web sources into findatapy library including
"""
# Data limit = 1000
def __init__(self):
super(DataVendorBitfinex, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request data from Bitfinex.")
import time
bitfinex_url = 'https://api.bitfinex.com/v2/candles/trade:{}:t{}/hist?start={}&end={}&limit=1000&sort=1'
if market_data_request_vendor.freq == 'intraday':
period = '1m'
if market_data_request_vendor.freq == 'daily':
period = '1D'
data_frame = pandas.DataFrame(columns=[0, 1, 2, 3, 4, 5])
start_time = int(market_data_request_vendor.start_date.timestamp() * 1000)
finish_time = int(market_data_request_vendor.finish_date.timestamp() * 1000)
stop_flag = 0
while stop_flag == 0:
if stop_flag == 1:
break
json_url = bitfinex_url.format(period, market_data_request_vendor.tickers[0], start_time, finish_time)
data_read = pandas.read_json(json_url)
if (len(data_read) < 1000):
if ((len(data_read) == 0) & (len(data_frame) == 0)):
break
else:
stop_flag = 1
data_frame = data_frame.append(data_read)
start_time = int(data_frame[0].tail(1))
time_library.sleep(2)
if (len(data_frame) == 0):
print('###############################################################')
print('Warning: No data. Please change the start_date and finish_date.')
print('###############################################################')
# return data_frame
data_frame.columns = ['mts', 'open', 'close', 'high', 'low', 'volume']
data_frame = data_frame.set_index('mts')
data_frame = data_frame[~data_frame.index.duplicated(keep='first')]
data_frame.index.name = 'Date'
data_frame.index = pandas.to_datetime(data_frame.index, unit='ms')
data_frame.columns = [market_data_request.tickers[0] + '.open',
market_data_request.tickers[0] + '.close',
market_data_request.tickers[0] + '.high',
market_data_request.tickers[0] + '.low',
market_data_request.tickers[0] + '.volume']
field_selected = []
for i in range(0, len(market_data_request_vendor.fields)):
field_selected.append(0)
field_selected[-1] = market_data_request.tickers[0] + '.' + market_data_request_vendor.fields[i]
logger.info("Completed request from Bitfinex.")
return data_frame[field_selected]
#########################################################################################################
from findatapy.market.datavendor import DataVendor
class DataVendorGdax(DataVendor):
"""Class for reading in data from various web sources into findatapy library including
"""
# Data limit = 350
def __init__(self):
super(DataVendorGdax, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request data from Gdax.")
gdax_url = 'https://api.gdax.com/products/{}/candles?start={}&end={}&granularity={}'
start_time = market_data_request_vendor.start_date
end_time = market_data_request_vendor.finish_date
if market_data_request_vendor.freq == 'intraday':
# 1 minute data
period = '60'
dt = timedelta(minutes=1)
if market_data_request_vendor.freq == 'daily':
period = '86400'
dt = timedelta(days=1)
limit = 350
data_frame = pandas.DataFrame(columns=[0, 1, 2, 3, 4, 5])
stop_flag = 0
while stop_flag == 0:
if stop_flag == 1:
break
data_end_time = start_time + (limit - 1) * dt
if data_end_time > end_time:
data_end_time = end_time
stop_flag = 1
json_url = gdax_url.format(market_data_request_vendor.tickers[0], start_time.isoformat(),
data_end_time.isoformat(), period)
data_read = pandas.read_json(json_url)
data_frame = data_frame.append(data_read)
if (len(data_read) == 0):
start_time = data_end_time
else:
start_time = pandas.to_datetime(int(data_read[0].head(1)), unit='s')
time_library.sleep(2)
if (len(data_frame) == 0):
print('###############################################################')
print('Warning: No data. Please change the start_date and finish_date.')
print('###############################################################')
data_frame.columns = ['time', 'low', 'high', 'open', 'close', 'volume']
data_frame = data_frame.set_index('time')
data_frame.index = pandas.to_datetime(data_frame.index, unit='s')
data_frame.index.name = 'Date'
data_frame = data_frame[~data_frame.index.duplicated(keep='first')]
data_frame = data_frame.sort_index(ascending=True)
data_frame.columns = [market_data_request.tickers[0] + '.low',
market_data_request.tickers[0] + '.high',
market_data_request.tickers[0] + '.open',
market_data_request.tickers[0] + '.close',
market_data_request.tickers[0] + '.volume']
field_selected = []
for i in range(0, len(market_data_request_vendor.fields)):
field_selected.append(0)
field_selected[-1] = market_data_request.tickers[0] + '.' + market_data_request_vendor.fields[i]
logger.info("Completed request from Gdax.")
return data_frame[field_selected]
########################################################################################################################
from findatapy.market.datavendor import DataVendor
class DataVendorKraken(DataVendor):
"""Class for reading in data from various web sources into findatapy library including
"""
# Data limit : can only get the most recent 720 rows for klines
# Collect data from all trades data
def __init__(self):
super(DataVendorKraken, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request data from Kraken.")
# kraken_url = 'https://api.kraken.com/0/public/OHLC?pair={}&interval={}&since=0'
# if market_data_request_vendor.freq == 'intraday':
# period = 1
# if market_data_request_vendor.freq == 'daily':
# period = 1440
start_time = int(market_data_request_vendor.start_date.timestamp() * 1e9)
end_time = int(market_data_request_vendor.finish_date.timestamp() * 1e9)
kraken_url = 'https://api.kraken.com/0/public/Trades?pair={}&since={}'
data_frame = pandas.DataFrame(columns=['close', 'volume', 'time', 'buy-sell', 'market-limit', 'miscellaneous'])
stop_flag = 0
while stop_flag == 0:
if stop_flag == 1:
break
json_url = kraken_url.format(market_data_request_vendor.tickers[0], start_time)
data_read = json.loads(requests.get(json_url).text)
if (len(list(data_read)) == 1):
time_library.sleep(10)
data_read = json.loads(requests.get(json_url).text)
data_list = list(data_read['result'])[0]
data_read = data_read['result'][data_list]
df = pandas.DataFrame(data_read,
columns=['close', 'volume', 'time', 'buy-sell', 'market-limit', 'miscellaneous'])
start_time = int(df['time'].tail(1) * 1e9)
if (start_time > end_time):
stop_flag = 1
if (end_time < int(df['time'].head(1) * 1e9)):
stop_flag = 1
data_frame = data_frame.append(df)
time_library.sleep(5)
data_frame = data_frame.set_index('time')
data_frame.index = pandas.to_datetime(data_frame.index, unit='s')
data_frame.index.name = 'Date'
data_frame = data_frame.drop(['miscellaneous'], axis=1)
data_frame.replace(['b', 's', 'm', 'l'], [1, -1, 1, -1], inplace=True)
data_frame = data_frame[(data_frame.index >= market_data_request_vendor.start_date) & (
data_frame.index <= market_data_request_vendor.finish_date)]
if (len(data_frame) == 0):
print('###############################################################')
print('Warning: No data. Please change the start_date and finish_date.')
print('###############################################################')
data_frame.columns = [market_data_request.tickers[0] + '.close',
market_data_request.tickers[0] + '.volume',
market_data_request.tickers[0] + '.buy-sell',
market_data_request.tickers[0] + '.market-limit']
field_selected = []
for i in range(0, len(market_data_request_vendor.fields)):
field_selected.append(0)
field_selected[-1] = market_data_request.tickers[0] + '.' + market_data_request_vendor.fields[i]
logger.info("Completed request from Kraken.")
return data_frame[field_selected]
#########################################################################################################
class DataVendorBitmex(DataVendor):
"""Class for reading in data from various web sources into findatapy library including
"""
# Data limit = 500, 150 calls / 5 minutes
def __init__(self):
super(DataVendorBitmex, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request data from Bitmex.")
bitMEX_url = 'https://www.bitmex.com/api/v1/quote?symbol={}&count=500&reverse=false&startTime={}&endTime={}'
data_frame = pandas.DataFrame(columns=['askPrice', 'askSize', 'bidPrice', 'bidSize', 'symbol', 'timestamp'])
start_time = market_data_request_vendor.start_date.timestamp()
finish_time = market_data_request_vendor.finish_date.timestamp()
symbol = market_data_request_vendor.tickers[0]
stop_flag = 0
while stop_flag == 0:
if stop_flag == 1:
break
json_url = bitMEX_url.format(symbol, start_time.isoformat(), finish_time.isoformat())
data_read = pandas.read_json(json_url)
if (len(data_read) < 500):
stop_flag = 1
data_frame = data_frame.append(data_read)
start_time = data_read['timestamp'][data_frame.index[-1]]
time_library.sleep(2)
if (len(data_frame) == 0):
print('###############################################################')
print('Warning: No data. Please change the start_date and finish_date.')
print('###############################################################')
data_frame = data_frame.drop(columns=['symbol'])
col = ['ask-price', 'ask-size', 'bid-price', 'bid-size', 'timestamp']
data_frame.columns = col
data_frame = data_frame.set_index('timestamp')
data_frame.index = pandas.to_datetime(data_frame.index, unit='ms')
data_frame = data_frame[~data_frame.index.duplicated(keep='first')]
data_frame.columns = [market_data_request.tickers[0] + '.ask-price',
market_data_request.tickers[0] + '.ask-size',
market_data_request.tickers[0] + '.bid-price',
market_data_request.tickers[0] + '.bid-size']
field_selected = []
for i in range(0, len(market_data_request_vendor.fields)):
field_selected.append(0)
field_selected[-1] = market_data_request.tickers[0] + '.' + market_data_request_vendor.fields[i]
logger.info("Completed request from Bitfinex.")
return data_frame[field_selected]
class DataVendorHuobi(DataVendor):
"""Class for reading in data from various web sources into findatapy library including
"""
# Data limit = 500, 150 calls / 5 minutes
def __init__(self):
super(DataVendorHuobi, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
def _calc_period_size(freq, start_dt, finish_dt):
actual_window = finish_dt - start_dt
extra_window = datetime.datetime.now() - finish_dt
request_window = actual_window + extra_window
if freq == 'daily':
return int(request_window.days), '1day'
if freq == 'tick':
request_minutes = request_window.total_seconds() / 60
return int(request_minutes), '1min'
raise ValueError("Unsupported freq: '{}'".format(freq))
# need to trick huobi to think we are a web-browser
header = {
"User-Agent": "Mozilla/5.0",
"X-Requested-With": "XMLHttpRequest"
}
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request data from Huobi.")
request_size, period = _calc_period_size(
market_data_request_vendor.freq,
market_data_request_vendor.start_date,
market_data_request_vendor.finish_date)
if request_size > 2000:
raise ValueError(
"Requested data too old for candle-stick frequency of '{}'".format(market_data_request_vendor.freq))
url = "https://api.huobi.pro/market/history/kline?period={period}&size={size}&symbol={symbol}".format(
period=period,
size=request_size,
symbol=market_data_request_vendor.tickers[0]
)
response = requests.get(url, headers=header)
raw_data = json.loads(response.text)
df = pandas.DataFrame(raw_data["data"])
df["timestamp"] = pandas.to_datetime(df["id"], unit="s")
df = df.set_index("timestamp").sort_index(ascending=True)
df = df[~df.index.duplicated(keep='first')]
df.drop(["id"], axis=1, inplace=True)
if df.empty:
logger.info('###############################################################')
logger.info('Warning: No data. Please change the start_date and finish_date.')
logger.info('###############################################################')
df.columns = ["{}.{}".format(market_data_request.tickers[0], col) for col in df.columns]
field_selected = []
for i in range(0, len(market_data_request_vendor.fields)):
field_selected.append(0)
field_selected[-1] = market_data_request.tickers[0] + '.' + market_data_request_vendor.fields[i]
logger.info("Completed request from Huobi.")
df = df[field_selected]
return df
#########################################################################################################
try:
from numba import jit
finally:
pass
# decompress binary files fetched from Dukascopy
try:
import lzma
except ImportError:
from backports import lzma
constants = DataConstants()
class DataVendorDukasCopy(DataVendor):
"""Class for downloading tick data from DukasCopy (note: past month of data is not available). Selecting very large
histories is not recommended as you will likely run out memory given the amount of data requested.
Parsing of files is re-written version https://github.com/nelseric/ticks/
parsing has been speeded up considerably
on-the-fly downloading/parsing
"""
tick_name = "{symbol}/{year}/{month}/{day}/{hour}h_ticks.bi5"
def __init__(self):
super(DataVendor, self).__init__()
import logging
logging.getLogger("requests").setLevel(logging.WARNING)
self.config = ConfigManager()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
"""Retrieves market data from external data source (in this case Bloomberg)
Parameters
----------
market_data_request : TimeSeriesRequest
contains all the various parameters detailing time series start and finish, tickers etc
Returns
-------
DataFrame
"""
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
data_frame = None
logger = LoggerManager.getLogger(__name__)
logger.info("Request Dukascopy data")
# doesn't support non-tick data
if (market_data_request.freq in ['daily', 'weekly', 'monthly', 'quarterly', 'yearly', 'intraday', 'minute',
'hourly']):
logger.warning("Dukascopy loader is for tick data only")
return None
# assume one ticker only (MarketDataGenerator only calls one ticker at a time)
if (market_data_request.freq in ['tick']):
# market_data_request_vendor.tickers = market_data_request_vendor.tickers[0]
data_frame = self.get_tick(market_data_request, market_data_request_vendor)
import pytz
if data_frame is not None:
data_frame = data_frame.tz_localize(pytz.utc)
logger.info("Completed request from Dukascopy")
return data_frame
def kill_session(self):
return
def get_tick(self, market_data_request, market_data_request_vendor):
data_frame = self.download_tick(market_data_request_vendor)
# convert from vendor to findatapy tickers/fields
if data_frame is not None:
returned_fields = data_frame.columns
returned_tickers = [market_data_request_vendor.tickers[0]] * (len(returned_fields))
if data_frame is not None:
fields = self.translate_from_vendor_field(returned_fields, market_data_request)
tickers = self.translate_from_vendor_ticker(returned_tickers, market_data_request)
ticker_combined = []
for i in range(0, len(fields)):
ticker_combined.append(tickers[i] + "." + fields[i])
data_frame.columns = ticker_combined
data_frame.index.name = 'Date'
return data_frame
def download_tick(self, market_data_request):
symbol = market_data_request.tickers[0]
logger = LoggerManager.getLogger(__name__)
logger.info("About to download from Dukascopy... for " + symbol)
# single threaded
# df_list = [self.fetch_file(time, symbol) for time in
# self.hour_range(md_request.start_date, md_request.finish_date)]
# parallel threaded (even with GIL, fast because lots of waiting for IO!)
from findatapy.util import SwimPool
time_list = self.hour_range(market_data_request.start_date, market_data_request.finish_date)
do_retrieve_df = True # convert inside loop?
multi_threaded = constants.dukascopy_multithreading # multithreading (can sometimes get errors but it's fine when retried, avoid using)
if multi_threaded:
completed = False
for i in range(1, 10):
try:
# Use threading (not multiprocess interface, which has issues with dukascopy download)
pool = SwimPool().create_pool('thread', constants.market_thread_no['dukascopy'])
results = [pool.apply_async(self.fetch_file, args=(ti, symbol, do_retrieve_df, try_time,))
for try_time, ti in enumerate(time_list)]
logger.debug("Attempting Dukascopy download " + str(i) + "... ")
# Have a long timeout, because internally it'll try to download several times
tick_list = [p.get(timeout=constants.timeout_downloader['dukascopy']) for p in results]
pool.close()
pool.join()
completed = True
break
except:
logger.warning("Didn't download on " + str(i) + " attempt... ")
time_library.sleep(i * 5)
if not (completed):
logger.warning("Failed to download from Dukascopy after several attempts")
else:
# fully single threaded
tick_list = []
time_list = list(time_list)
for time in time_list:
tick_list.append(self.fetch_file(time, symbol, do_retrieve_df, 0))
if do_retrieve_df:
df_list = tick_list
else:
df_list = []
i = 0
time_list = self.hour_range(market_data_request.start_date, market_data_request.finish_date)
for time in time_list:
try:
temp_df = self.retrieve_df(lzma.decompress(tick_list[i]), symbol, time)
except Exception as e:
print(str(time) + ' ' + str(e))
# print(str(e))
temp_df = None
df_list.append(temp_df)
i = i + 1
df_list = [x for x in df_list if x is not None]
try:
return pandas.concat(df_list)
except:
return None
def fetch_file(self, time, symbol, do_retrieve_df, try_time):
logger = LoggerManager.getLogger(__name__)
tick_path = self.tick_name.format(
symbol=symbol,
year=str(time.year).rjust(4, '0'),
month=str(time.month - 1).rjust(2, '0'),
day=str(time.day).rjust(2, '0'),
hour=str(time.hour).rjust(2, '0')
)
url = constants.dukascopy_base_url + tick_path
if time.hour % 24 == 0:
logger.info("Downloading... " + str(time) + " " + url)
tick = self.fetch_tick(url, try_time)
# print(tick_path)
if constants.dukascopy_write_temp_tick_disk:
out_path = constants.temp_folder + "/dkticks/" + tick_path
if not os.path.exists(out_path):
if not os.path.exists(os.path.dirname(out_path)):
os.makedirs(os.path.dirname(out_path))
self.write_tick(tick, out_path)
if do_retrieve_df:
try:
return self.retrieve_df(lzma.decompress(tick), symbol, time)
except Exception as e:
# print(tick_path + ' ' + str(e))
# print(str(e))
return None
return tick
def fetch_tick(self, tick_url, try_time):
download_counter = 0
tick_request_content = None
logger = LoggerManager.getLogger(__name__)
logger.debug("Loading URL " + tick_url)
# Sleep for a small amount of time, so multiple threads don't all poll external website at the same time
time_library.sleep(constants.dukascopy_try_time * try_time / 2.0) # constants.market_thread_no['dukascopy'])
# Try up to 20 times to download
while download_counter < constants.dukascopy_retries:
try:
tick_request = requests.get(tick_url, timeout=constants.dukascopy_mini_timeout_seconds)
# If URL has not been found try again
if tick_request.status_code == 404:
logger.warning("Error downloading.. " + tick_url + " returned 404 URL not found message! Are you sure Dukascopy has this asset?")
tick_request_content = None
tick_request.close()
break
else:
# Otherwise attempt to parse it and extract content
tick_request_content = tick_request.content
tick_request.close()
content_text = tick_request_content.decode("latin1")
# Can sometimes get back an error HTML page, in which case retry
if 'error' not in str(content_text):
break
else:
logger.warning("Error downloading.. " + tick_url + " " + content_text + " will try again "
+ str(download_counter) + " occasion")
except Exception as e:
logger.warning(
"Problem downloading.. " + tick_url + " " + str(e) + ".. will try again " + str(download_counter) + " occasion")
download_counter = download_counter + 1
# Sleep a bit, so don't overload server with retries
time_library.sleep((try_time / 2.0))
if (tick_request_content is None):
logger.warning("Failed to download from " + tick_url)
return None
logger.debug("Downloaded URL " + tick_url)
return tick_request_content
def write_tick(self, content, out_path):
data_file = open(out_path, "wb+")
data_file.write(content)
data_file.close()
def chunks(self, list, n):
if n < 1:
n = 1
return [list[i:i + n] for i in range(0, len(list), n)]
def retrieve_df(self, data, symbol, epoch):
date, tuple = self.parse_tick_data(data, epoch)
df = pandas.DataFrame(data=tuple, columns=['temp', 'ask', 'bid', 'askv', 'bidv'], index=date)
df.drop('temp', axis=1)
df.index.name = 'Date'
# Default FX divisior
divisor = 100000.0
# Where JPY is the terms currency we have different divisor
if symbol[3:6] == 'JPY':
divisor = 1000.0
# Special case! You may need to add more here
elif symbol == 'BRENTCMDUSD':
divisor = 1000.0
elif len(symbol) > 6:
divisor = 1.0
# prices are returned without decimal point (need to divide)
df['bid'] = df['bid'] / divisor
df['ask'] = df['ask'] / divisor
return df
def hour_range(self, start_date, end_date):
delta_t = end_date - start_date
delta_hours = (delta_t.days * 24.0) + (delta_t.seconds / 3600.0)
out_times = []
for n in range(int(delta_hours)):
out_times.append(start_date + timedelta(0, 0, 0, 0, 0, n)) # Hours
if out_times == []:
out_times.append(start_date)
return out_times
def parse_tick_data(self, data, epoch):
import struct
# tick = namedtuple('Tick', 'Date ask bid askv bidv')
chunks_list = self.chunks(data, 20)
parsed_list = []
date = []
# note: Numba can speed up for loops
for row in chunks_list:
d = struct.unpack(">LLLff", row)
# d = struct.unpack('>3i2f', row)
date.append((epoch + timedelta(0, 0, 0, d[0])))
# SLOW: no point using named tuples!
# row_data = tick._asdict(tick._make(d))
# row_data['Date'] = (epoch + timedelta(0,0,0,row_data['Date']))
parsed_list.append(d)
return date, parsed_list
def chunks(self, list, n):
if n < 1: n = 1
return [list[i:i + n] for i in range(0, len(list), n)]
def get_daily_data(self):
pass
########################################################################################################################
##from StringIO import StringIO
from io import BytesIO
import gzip
import urllib
##Available Currencies
##AUDCAD,AUDCHF,AUDJPY, AUDNZD,CADCHF,EURAUD,EURCHF,EURGBP
##EURJPY,EURUSD,GBPCHF,GBPJPY,GBPNZD,GBPUSD,GBPCHF,GBPJPY
##GBPNZD,NZDCAD,NZDCHF.NZDJPY,NZDUSD,USDCAD,USDCHF,USDJPY
class DataVendorFXCM(DataVendor):
"""Class for downloading tick data from FXCM. Selecting very large
histories is not recommended as you will likely run out memory given the amount of data requested. Loads csv.gz
files from FXCM and then converts into pandas DataFrames locally.
Based on https://github.com/FXCMAPI/FXCMTickData/blob/master/TickData34.py
"""
url_suffix = '.csv.gz' ##Extension of the file name
def __init__(self):
super(DataVendor, self).__init__()
import logging
logging.getLogger("requests").setLevel(logging.WARNING)
self.config = ConfigManager()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
"""Retrieves market data from external data source (in this case Bloomberg)
Parameters
----------
market_data_request : TimeSeriesRequest
contains all the various parameters detailing time series start and finish, tickers etc
Returns
-------
DataFrame
"""
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
data_frame = None
logger.info("Request FXCM data")
# doesn't support non-tick data
if (market_data_request.freq in ['daily', 'weekly', 'monthly', 'quarterly', 'yearly', 'intraday', 'minute',
'hourly']):
logger.warning("FXCM loader is for tick data only")
return None
# assume one ticker only (MarketDataGenerator only calls one ticker at a time)
if (market_data_request.freq in ['tick']):
# market_data_request_vendor.tickers = market_data_request_vendor.tickers[0]
data_frame = self.get_tick(market_data_request, market_data_request_vendor)
import pytz
if data_frame is not None: data_frame.tz_localize(pytz.utc)
logger.info("Completed request from FXCM")
return data_frame
def kill_session(self):
return
def get_tick(self, market_data_request, market_data_request_vendor):
data_frame = self.download_tick(market_data_request_vendor)
# convert from vendor to findatapy tickers/fields
if data_frame is not None:
returned_fields = data_frame.columns
returned_tickers = [market_data_request_vendor.tickers[0]] * (len(returned_fields))
if data_frame is not None:
fields = self.translate_from_vendor_field(returned_fields, market_data_request)
tickers = self.translate_from_vendor_ticker(returned_tickers, market_data_request)
ticker_combined = []
for i in range(0, len(fields)):
ticker_combined.append(tickers[i] + "." + fields[i])
data_frame.columns = ticker_combined
data_frame.index.name = 'Date'
return data_frame
def download_tick(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
symbol = market_data_request.tickers[0]
logger.info("About to download from FXCM... for " + symbol)
# single threaded
# df_list = [self.fetch_file(week_year, symbol) for week_year in
# self.week_range(md_request.start_date, md_request.finish_date)]
# parallel threaded (note: lots of waiting on IO, so even with GIL quicker!)
week_list = self.week_range(market_data_request.start_date, market_data_request.finish_date)
from findatapy.util import SwimPool
pool = SwimPool().create_pool('thread', constants.market_thread_no['fxcm'])
results = [pool.apply_async(self.fetch_file, args=(week, symbol)) for week in week_list]
df_list = [p.get() for p in results]
pool.close()
try:
return pandas.concat(df_list)
except:
return None
def fetch_file(self, week_year, symbol):
logger = LoggerManager().getLogger(__name__)
logger.info("Downloading... " + str(week_year))
week = week_year[0]
year = week_year[1]
tick_path = symbol + '/' + str(year) + '/' + str(week) + self.url_suffix
return self.retrieve_df(constants.fxcm_base_url + tick_path)
def parse_datetime(self):
pass
def retrieve_df(self, tick_url):
i = 0
logger = LoggerManager().getLogger(__name__)
data_frame = None
# Python 2 vs. 3
try:
from StringIO import StringIO
except:
from io import StringIO
# try up to 5 times to download
while i < 5:
try:
requests = urllib.request.urlopen(tick_url)
buf = BytesIO(requests.read())
with gzip.GzipFile(fileobj=buf, mode='rb') as f:
# slightly awkward date parser (much faster than using other Python methods)
# TODO use ciso8601 library (uses C parser, slightly quicker)
dateparse = lambda x: datetime.datetime(int(x[6:10]), int(x[0:2]), int(x[3:5]),
int(x[11:13]), int(x[14:16]), int(x[17:19]),
int(x[20:23]) * 1000)
data_frame = pandas.read_csv(StringIO(f.read().decode('utf-16')), index_col=0, parse_dates=True,
date_parser=dateparse)
data_frame.columns = ['bid', 'ask']
f.close()
i = 5
except:
i = i + 1
if (data_frame is None):
logger.warning("Failed to download from " + tick_url)
return None
return data_frame
def week_range(self, start_date, finish_date):
weeks = pandas.bdate_range(start_date - timedelta(days=7), finish_date + timedelta(days=7), freq='W')
week_year = []
for w in weeks:
# week = w.week
# if week != 52:
# year = w.year
# week_year.append((week, year))
year, week = w.isocalendar()[0:2]
week_year.append((week, year))
# if less than a week a
if week_year == []:
week_year.append((start_date.week, start_date.year))
return week_year
def get_daily_data(self):
pass
########################################################################################################################
if sys.version_info[0] >= 3:
from urllib.request import urlopen
from urllib.parse import quote_plus
from urllib.parse import urlencode
from urllib.error import HTTPError
else:
from urllib2 import urlopen
from urllib2 import HTTPError
from urllib import quote_plus
from urllib import urlencode
import xml.etree.ElementTree as ET
class Fred(object):
"""Auxillary class for getting access to ALFRED/FRED directly.
Based on https://github.com/mortada/fredapi (with minor edits)
"""
earliest_realtime_start = '1776-07-04'
latest_realtime_end = '9999-12-31'
nan_char = '.'
max_results_per_request = 1000
def __init__(self,
api_key=None,
api_key_file=None):
"""Initialize the Fred class that provides useful functions to query the Fred dataset. You need to specify a valid
API key in one of 3 ways: pass the string via api_key, or set api_key_file to a file with the api key in the
first line, or set the environment variable 'FRED_API_KEY' to the value of your api key. You can sign up for a
free api key on the Fred website at http://research.stlouisfed.org/fred2/
"""
self.api_key = None
if api_key is not None:
self.api_key = api_key
elif api_key_file is not None:
f = open(api_key_file, 'r')
self.api_key = f.readline().strip()
f.close()
else:
self.api_key = os.environ.get('FRED_API_KEY')
self.root_url = 'https://api.stlouisfed.org/fred'
if self.api_key is None:
import textwrap
raise ValueError(textwrap.dedent("""\
You need to set a valid API key. You can set it in 3 ways:
pass the string with api_key, or set api_key_file to a
file with the api key in the first line, or set the
environment variable 'FRED_API_KEY' to the value of your
api key. You can sign up for a free api key on the Fred
website at http://research.stlouisfed.org/fred2/"""))
def __fetch_data(self, url):
"""Helper function for fetching data given a request URL
"""
try:
response = urlopen(url)
root = ET.fromstring(response.read())
except HTTPError as exc:
root = ET.fromstring(exc.read())
raise ValueError(root.get('message'))
return root
def _parse(self, date_str, format='%Y-%m-%d'):
"""Helper function for parsing FRED date string into datetime
"""
from pandas import to_datetime
rv = to_datetime(date_str, format=format)
if hasattr(rv, 'to_datetime'):
rv = rv.to_pydatetime() # to_datetime is depreciated
return rv
def get_series_info(self, series_id):
"""Get information about a series such as its title, frequency, observation start/end dates, units, notes, etc.
Parameters
----------
series_id : str
Fred series id such as 'CPIAUCSL'
Returns
-------
info : Series
a pandas Series containing information about the Fred series
"""
url = "%s/series?series_id=%s&api_key=%s" % (self.root_url, series_id,
self.api_key)
root = self.__fetch_data(url)
if root is None:
raise ValueError('No info exists for series id: ' + series_id)
from pandas import Series
info = Series(root.getchildren()[0].attrib)
return info
def get_series(self, series_id, observation_start=None, observation_end=None, **kwargs):
"""Get data for a Fred series id. This fetches the latest known data, and is equivalent to get_series_latest_release()
Parameters
----------
series_id : str
Fred series id such as 'CPIAUCSL'
observation_start : datetime or datetime-like str such as '7/1/2014', optional
earliest observation date
observation_end : datetime or datetime-like str such as '7/1/2014', optional
latest observation date
kwargs : additional parameters
Any additional parameters supported by FRED. You can see https://api.stlouisfed.org/docs/fred/series_observations.html for the full list
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
url = "%s/series/observations?series_id=%s&api_key=%s" % (self.root_url,
series_id,
self.api_key)
from pandas import to_datetime, Series
if observation_start is not None:
observation_start = to_datetime(observation_start, errors='raise')
url += '&observation_start=' + observation_start.strftime('%Y-%m-%d')
if observation_end is not None:
observation_end = to_datetime(observation_end, errors='raise')
url += '&observation_end=' + observation_end.strftime('%Y-%m-%d')
if kwargs is not None:
url += '&' + urlencode(kwargs)
root = self.__fetch_data(url)
if root is None:
raise ValueError('No data exists for series id: ' + series_id)
data = {}
for child in root.getchildren():
val = child.get('value')
if val == self.nan_char:
val = float('NaN')
else:
val = float(val)
data[self._parse(child.get('date'))] = val
return Series(data)
def get_series_latest_release(self, series_id, observation_start=None, observation_end=None):
"""Get data for a Fred series id. This fetches the latest known data, and is equivalent to get_series()
Parameters
----------
series_id : str
Fred series id such as 'CPIAUCSL'
Returns
-------
info : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
return self.get_series(series_id, observation_start=observation_start, observation_end=observation_end)
def get_series_first_release(self, series_id, observation_start=None, observation_end=None):
"""Get first-release data for a Fred series id.
This ignores any revision to the data series. For instance,
The US GDP for Q1 2014 was first released to be 17149.6, and then later revised to 17101.3, and 17016.0.
This will ignore revisions after the first release.
Parameters
----------
series_id : str
Fred series id such as 'GDP'
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
df = self.get_series_all_releases(series_id, observation_start=observation_start,
observation_end=observation_end)
first_release = df.groupby('date').head(1)
data = first_release.set_index('date')['value']
return data
def get_series_first_revision(self, series_id, observation_start=None, observation_end=None):
"""Get first-revision data for a Fred series id.
This will give the first revision to the data series. For instance,
The US GDP for Q1 2014 was first released to be 17149.6, and then later revised to 17101.3, and 17016.0.
This will take the first revision ie. 17101.3 in this case.
Parameters
----------
series_id : str
Fred series id such as 'GDP'
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
df = self.get_series_all_releases(series_id, observation_start=observation_start,
observation_end=observation_end)
first_revision = df.groupby('date').head(2)
data = first_revision.set_index('date')['value']
data = data[~data.index.duplicated(keep='last')]
return data
def get_series_as_of_date(self, series_id, as_of_date):
"""Get latest data for a Fred series id as known on a particular date.
This includes any revision to the data series
before or on as_of_date, but ignores any revision on dates after as_of_date.
Parameters
----------
series_id : str
Fred series id such as 'GDP'
as_of_date : datetime, or datetime-like str such as '10/25/2014'
Include data revisions on or before this date, and ignore revisions afterwards
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
from pandas import to_datetime
as_of_date = to_datetime(as_of_date)
df = self.get_series_all_releases(series_id)
data = df[df['realtime_start'] <= as_of_date]
return data
def get_series_all_releases(self, series_id, observation_start=None, observation_end=None):
"""Get all data for a Fred series id including first releases and all revisions.
This returns a DataFrame
with three columns: 'date', 'realtime_start', and 'value'. For instance, the US GDP for Q4 2013 was first released
to be 17102.5 on 2014-01-30, and then revised to 17080.7 on 2014-02-28, and then revised to 17089.6 on
2014-03-27. You will therefore get three rows with the same 'date' (observation date) of 2013-10-01 but three
different 'realtime_start' of 2014-01-30, 2014-02-28, and 2014-03-27 with corresponding 'value' of 17102.5, 17080.7
and 17089.6
Parameters
----------
series_id : str
Fred series id such as 'GDP'
Returns
-------
data : DataFrame
a DataFrame with columns 'date', 'realtime_start' and 'value' where 'date' is the observation period and 'realtime_start'
is when the corresponding value (either first release or revision) is reported.
"""
url = "%s/series/observations?series_id=%s&api_key=%s&realtime_start=%s&realtime_end=%s" % (self.root_url,
series_id,
self.api_key,
self.earliest_realtime_start,
self.latest_realtime_end)
from pandas import to_datetime
if observation_start is not None:
observation_start = to_datetime(observation_start, errors='raise')
url += '&observation_start=' + observation_start.strftime('%Y-%m-%d')
if observation_end is not None:
observation_end = to_datetime(observation_end, errors='raise')
url += '&observation_end=' + observation_end.strftime('%Y-%m-%d')
root = self.__fetch_data(url)
if root is None:
raise ValueError('No data exists for series id: ' + series_id)
data = {}
i = 0
for child in root.getchildren():
val = child.get('value')
if val == self.nan_char:
val = float('NaN')
else:
val = float(val)
realtime_start = self._parse(child.get('realtime_start'))
# realtime_end = self._parse(child.get('realtime_end'))
date = self._parse(child.get('date'))
data[i] = {'realtime_start': realtime_start,
# 'realtime_end': realtime_end,
'date': date,
'value': val}
i += 1
from pandas import DataFrame
data = DataFrame(data).T
return data
def get_series_vintage_dates(self, series_id):
"""Get a list of vintage dates for a series.
Vintage dates are the dates in history when a series' data values were revised or new data values were released.
Parameters
----------
series_id : str
Fred series id such as 'CPIAUCSL'
Returns
-------
dates : list
list of vintage dates
"""
url = "%s/series/vintagedates?series_id=%s&api_key=%s" % (self.root_url,
series_id,
self.api_key)
root = self.__fetch_data(url)
if root is None:
raise ValueError('No vintage date exists for series id: ' + series_id)
dates = []
for child in root.getchildren():
dates.append(self._parse(child.text))
return dates
def __do_series_search(self, url):
"""Helper function for making one HTTP request for data, and parsing the returned results into a DataFrame
"""
root = self.__fetch_data(url)
series_ids = []
data = {}
num_results_returned = 0 # number of results returned in this HTTP request
num_results_total = int(
root.get('count')) # total number of results, this can be larger than number of results returned
for child in root.getchildren():
num_results_returned += 1
series_id = child.get('id')
series_ids.append(series_id)
data[series_id] = {"id": series_id}
fields = ["realtime_start", "realtime_end", "title", "observation_start", "observation_end",
"frequency", "frequency_short", "units", "units_short", "seasonal_adjustment",
"seasonal_adjustment_short", "last_updated", "popularity", "notes"]
for field in fields:
data[series_id][field] = child.get(field)
if num_results_returned > 0:
from pandas import DataFrame
data = DataFrame(data, columns=series_ids).T
# parse datetime columns
for field in ["realtime_start", "realtime_end", "observation_start", "observation_end", "last_updated"]:
data[field] = data[field].apply(self._parse, format=None)
# set index name
data.index.name = 'series id'
else:
data = None
return data, num_results_total
def __get_search_results(self, url, limit, order_by, sort_order):
"""Helper function for getting search results up to specified limit on the number of results. The Fred HTTP API
truncates to 1000 results per request, so this may issue multiple HTTP requests to obtain more available data.
"""
order_by_options = ['search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated',
'observation_start', 'observation_end', 'popularity']
if order_by is not None:
if order_by in order_by_options:
url = url + '&order_by=' + order_by
else:
raise ValueError(
'%s is not in the valid list of order_by options: %s' % (order_by, str(order_by_options)))
sort_order_options = ['asc', 'desc']
if sort_order is not None:
if sort_order in sort_order_options:
url = url + '&sort_order=' + sort_order
else:
raise ValueError(
'%s is not in the valid list of sort_order options: %s' % (sort_order, str(sort_order_options)))
data, num_results_total = self.__do_series_search(url)
if data is None:
return data
if limit == 0:
max_results_needed = num_results_total
else:
max_results_needed = limit
if max_results_needed > self.max_results_per_request:
for i in range(1, max_results_needed // self.max_results_per_request + 1):
offset = i * self.max_results_per_request
next_data, _ = self.__do_series_search(url + '&offset=' + str(offset))
data = data.append(next_data)
return data.head(max_results_needed)
def search(self, text, limit=1000, order_by=None, sort_order=None):
"""Do a fulltext search for series in the Fred dataset.
Returns information about matching series in a DataFrame.
Parameters
----------
text : str
text to do fulltext search on, e.g., 'Real GDP'
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series
"""
url = "%s/series/search?search_text=%s&api_key=%s" % (self.root_url,
quote_plus(text),
self.api_key)
info = self.__get_search_results(url, limit, order_by, sort_order)
return info
def search_by_release(self, release_id, limit=0, order_by=None, sort_order=None):
"""Search for series that belongs to a release id.
Returns information about matching series in a DataFrame.
Parameters
----------
release_id : int
release id, e.g., 151
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series
"""
url = "%s/release/series?release_id=%d&&api_key=%s" % (self.root_url,
release_id,
self.api_key)
info = self.__get_search_results(url, limit, order_by, sort_order)
if info is None:
raise ValueError('No series exists for release id: ' + str(release_id))
return info
def search_by_category(self, category_id, limit=0, order_by=None, sort_order=None):
"""Search for series that belongs to a category id.
Returns information about matching series in a DataFrame.
Parameters
----------
category_id : int
category id, e.g., 32145
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series
"""
url = "%s/category/series?category_id=%d&api_key=%s" % (self.root_url,
category_id,
self.api_key)
info = self.__get_search_results(url, limit, order_by, sort_order)
if info is None:
raise ValueError('No series exists for category id: ' + str(category_id))
return info
########################################################################################################################
class DataVendorFlatFile(DataVendor):
"""Reads in data from a user-specifed Parquet, CSV, HDF5 flat file (or arctic) via findatapy library. Does not do any ticker/field
mapping, as this could vary significantly between Parquet, CSV/HDF5 files (or arctic).
Users need to know the tickers/fields they wish to collect. Can also be used with predefined tickers, in this case
the filename format is of the form
eg. backtest.fx.quandl.daily.NYC.parquet (for daily data)
eg. backtest.fx.dukascopy.tick.NYC.EURUSD.parquet (for tick/intraday data, we store each ticker in a separate file)
"""
def __init__(self):
super(DataVendorFlatFile, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request, index_col=0, max_workers=1):
logger = LoggerManager().getLogger(__name__)
data_source_list = market_data_request.data_source
data_engine = market_data_request.data_engine
if isinstance(data_source_list, list):
pass
else:
data_source_list = [data_source_list]
data_frame_list = []
def download_data_frame(data_source):
if data_engine is not None:
logger.info("Request " + market_data_request.data_source + " data via " + data_engine)
# If a file path has been specified
if '*' in data_engine:
w = data_engine.split("*.")
folder = w[0]
file_format = w[-1]
# For intraday/tick files each ticker is stored in a separate file
if market_data_request.freq == 'intraday' or market_data_request.freq == 'tick':
path = market_data_request.environment + "." \
+ market_data_request.category + "." + data_source + "." + market_data_request.freq \
+ "." + market_data_request.cut + "." + market_data_request.tickers[0] + "." + file_format
else:
path = market_data_request.environment + "." \
+ market_data_request.category + "." + data_source + "." + market_data_request.freq \
+ "." + market_data_request.cut + "." + file_format
full_path = os.path.join(folder, path)
else:
# Otherwise a database like arctic has been specified
# For intraday/tick files each ticker is stored in a separate file
if market_data_request.freq == 'intraday' or market_data_request.freq == 'tick':
full_path = market_data_request.environment + "." \
+ market_data_request.category + "." + data_source + "." + market_data_request.freq \
+ "." + market_data_request.cut + "." + market_data_request.tickers[0]
else:
full_path = market_data_request.environment + "." \
+ market_data_request.category + "." + data_source + "." + market_data_request.freq \
+ "." + market_data_request.cut
else:
logger.info("Request " + data_source + " data")
full_path = data_source
if ".zip" in data_source:
import zipfile
if "http" in full_path:
from requests import get
request = get(full_path)
zf = zipfile.ZipFile(BytesIO(request.content))
else:
zf = zipfile.ZipFile(full_path)
name_list = zipfile.ZipFile.namelist(zf)
df_list = []
for name in name_list:
df_list.append(pd.read_csv(zf.open(name), index_col=index_col, parse_dates=True,
infer_datetime_format=True))
data_frame = pd.concat(df_list)
elif ".csv" in data_source:
data_frame = pandas.read_csv(full_path, index_col=index_col, parse_dates=True,
infer_datetime_format=True)
elif ".h5" in data_source:
data_frame = IOEngine().read_time_series_cache_from_disk(full_path, engine='hdf5')
elif ".parquet" in data_source or '.gzip' in data_source:
data_frame = IOEngine().read_time_series_cache_from_disk(full_path, engine='parquet')
else:
data_frame = IOEngine().read_time_series_cache_from_disk(full_path, engine=data_engine)
if data_frame is None or data_frame.index is []: return None
if data_frame is not None:
tickers = data_frame.columns
if data_frame is not None:
# Tidy up tickers into a format that is more easily translatable
# we can often get multiple fields returned (even if we don't ask for them!)
# convert to lower case
ticker_combined = []
for i in range(0, len(tickers)):
if "." in tickers[i]:
ticker_combined.append(tickers[i])
else:
ticker_combined.append(tickers[i] + ".close")
data_frame.columns = ticker_combined
data_frame.index.name = 'Date'
logger.info("Completed request from " + str(data_source) + " for " + str(ticker_combined))
return data_frame
if max_workers == 1:
for data_source in data_source_list:
data_frame_list.append(download_data_frame(data_source))
else:
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
data_frame_list = list(executor.map(download_data_frame, data_source_list))
if data_frame_list != []:
data_frame_list_filtered = []
for data_frame in data_frame_list:
if data_frame is not None:
data_frame_list_filtered.append(data_frame)
data_frame = pd.concat(data_frame_list)
return data_frame
########################################################################################################################
from alpha_vantage.timeseries import TimeSeries
class DataVendorAlphaVantage(DataVendor):
"""Reads in data from Alpha Vantage into findatapy library
"""
def __init__(self):
super(DataVendorAlphaVantage, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request AlphaVantage data")
data_frame, _ = self.download(market_data_request_vendor)
if data_frame is None or data_frame.index is []: return None
# convert from vendor to findatapy tickers/fields
if data_frame is not None:
returned_tickers = data_frame.columns
if data_frame is not None:
# tidy up tickers into a format that is more easily translatable
# we can often get multiple fields returned (even if we don't ask for them!)
# convert to lower case
returned_fields = [(x.split('. ')[1]).lower() for x in returned_tickers]
import numpy as np
returned_tickers = np.repeat(market_data_request_vendor.tickers, len(returned_fields))
try:
fields = self.translate_from_vendor_field(returned_fields, market_data_request)
tickers = self.translate_from_vendor_ticker(returned_tickers, market_data_request)
except:
logger.error("Could not convert tickers/fields from Alpha Vantage")
ticker_combined = []
for i in range(0, len(tickers)):
try:
ticker_combined.append(tickers[i] + "." + fields[i])
except:
ticker_combined.append(tickers[i] + ".close")
data_frame.columns = ticker_combined
data_frame.index.name = 'Date'
logger.info("Completed request from Alpha Vantage for " + str(ticker_combined))
return data_frame
def download(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
trials = 0
ts = TimeSeries(key=market_data_request.alpha_vantage_api_key, output_format='pandas', indexing_type='date')
data_frame = None
while (trials < 5):
try:
if market_data_request.freq == 'intraday':
data_frame = ts.get_intraday(symbol=market_data_request.tickers, interval='1min', outputsize='full')
else:
data_frame = ts.get_daily(symbol=market_data_request.tickers, outputsize='full')
break
except Exception as e:
trials = trials + 1
logger.info("Attempting... " + str(
trials) + " request to download from Alpha Vantage due to following error: " + str(e))
if trials == 5:
logger.error("Couldn't download from Alpha Vantage after several attempts!")
return data_frame
########################################################################################################################
try:
import fxcmpy
except:
pass
class DataVendorFXCMPY(DataVendor):
"""Reads in data from FXCM data using fxcmpy into findatapy library. Can be used for minute or daily data. For
tick data we should use DataVendorFXCM (but this data is delayed).
NOTE: NOT TESTED YET
"""
def __init__(self):
super(DataVendorFXCM, self).__init__()
# implement method in abstract superclass
def load_ticker(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
market_data_request_vendor = self.construct_vendor_market_data_request(market_data_request)
logger.info("Request FXCM data")
data_frame, _ = self.download(market_data_request_vendor)
if data_frame is None or data_frame.index is []: return None
# convert from vendor to findatapy tickers/fields
if data_frame is not None:
returned_tickers = data_frame.columns
if data_frame is not None:
# tidy up tickers into a format that is more easily translatable
# we can often get multiple fields returned (even if we don't ask for them!)
# convert to lower case
returned_fields = [(x.split('. ')[1]).lower() for x in returned_tickers]
import numpy as np
returned_tickers = np.repeat(market_data_request_vendor.tickers, len(returned_fields))
try:
fields = self.translate_from_vendor_field(returned_fields, market_data_request)
tickers = self.translate_from_vendor_ticker(returned_tickers, market_data_request)
except:
print('error')
ticker_combined = []
for i in range(0, len(tickers)):
try:
ticker_combined.append(tickers[i] + "." + fields[i])
except:
ticker_combined.append(tickers[i] + ".close")
data_frame.columns = ticker_combined
data_frame.index.name = 'Date'
logger.info("Completed request from FXCM for " + str(ticker_combined))
return data_frame
def download(self, market_data_request):
logger = LoggerManager().getLogger(__name__)
trials = 0
con = fxcmpy.fxcmpy(access_token=constants.fxcm_API, log_level='error')
data_frame = None
if market_data_request.freq == 'intraday':
per = 'm1'
else:
per = 'D1'
tickers = [t[0:4] + "/" + t[4:7] for t in market_data_request.tickers]
while (trials < 5):
try:
data_frame = con.get_candles(tickers, period=per,
start=market_data_request.start_date, stop=market_data_request.finish_date)
break
except Exception as e:
trials = trials + 1
logger.info(
"Attempting... " + str(trials) + " request to download from FXCM due to following error: " + str(e))
if trials == 5:
logger.error("Couldn't download from FXCM after several attempts!")
return data_frame
| apache-2.0 |
odoousers2014/addons-yelizariev | sugarcrm_migration/import_kashflow.py | 16 | 21779 | # -*- coding: utf-8 -*-
import logging
_logger = logging.getLogger(__name__)
from openerp.exceptions import except_orm
try:
import MySQLdb
import MySQLdb.cursors
from pandas import merge, DataFrame
except ImportError:
pass
from openerp.addons.import_framework.import_base import import_base
from openerp.addons.import_framework.mapper import *
import re
import time
import datetime as DT
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import csv
class fix_kashflow_date(mapper):
"""
convert '31/12/2012' to '2012-12-31'
"""
def __init__(self, field_name):
self.field_name = field_name
def __call__(self, external_values):
s = external_values.get(self.field_name)
if not s:
return ''
d,m,y = str(s).split('/')
return '%s-%s-%s' % (y,m,d)
class date_to_period(fix_kashflow_date, dbmapper):
def __init__(self, field_name, context):
super(date_to_period, self).__init__(field_name)
self.context = context()
def __call__(self, external_values):
s = super(date_to_period, self).__call__(external_values)
dt = DT.datetime.strptime(s, tools.DEFAULT_SERVER_DATE_FORMAT)
period_ids = self.parent.pool.get('account.period').find(self.parent.cr, self.parent.uid, dt=dt, context=self.context)
if not period_ids:
print 'period_ids not found', s
return period_ids and str(period_ids[0]) or ''
class import_kashflow(import_base):
TABLE_COMPANY = 'companies'
TABLE_CUSTOMER = '-customers'
TABLE_SUPPLIER = '-suppliers'
TABLE_PARTNER = '_partners'
TABLE_JOURNAL = '_journals'
TABLE_NOMINAL_CODES = '-nominal-codes'
TABLE_NOMINAL_CODES_ROOT = '-nominal-codes_root'
TABLE_TRANSACTION = '-transactions'
COL_ID_CUSTOM = 'id'
COL_LINE_NUM = 'line_num'
COL_NOMINAL_CODE = 'Nominal Code'
COL_NOMINAL_CODE_NAME = 'Name'
COL_TR_TYPE = 'Transaction Type'
COL_TR_BANK = 'Bank'
COL_TR_CODE = 'Code'
COL_TR_DATE = 'Date'
COL_TR_TRANSACTION = 'Account'
COL_TR_COMMENT = 'Comment'
COL_TR_AMOUNT = 'Amount'
COL_TR_VAT_RATE = 'VAT Rate'
COL_TR_VAT_AMOUNT = 'VAT Amount'
COL_TR_DEPARTMENT = 'Department'
COL_P_CODE = 'Code'
COL_P_NAME = 'Name'
COL_P_ADDRESS = 'Address'
COL_P_LINE_2 = 'Line 2'
COL_P_LINE_3 = 'Line 3'
COL_P_LINE_4 = 'Line 4'
COL_P_POST_CODE = 'Post Code'
COL_P_FULL_NAME = 'Full Name'
COL_P_TELEPHONE = 'Telephone'
COL_P_MOBILE = 'Mobile'
COL_P_SOURCE = 'Source'
def initialize(self):
# files:
# COMPANY_NAME-customers.csv
# COMPANY_NAME-suppliers.csv
# COMPANY_NAME-nominal-codes.csv
# COMPANY_NAME-transactions.csv
self.csv_files = self.context.get('csv_files')
self.import_options.update({'separator':',',
#'quoting':''
})
companies = []
for f in self.csv_files:
if f.endswith('-transactions.csv'):
c = re.match('.*?([^/]*)-transactions.csv$', f).group(1)
companies.append(c)
self.companies = [{'name':c} for c in companies]
def get_data(self, table):
file_name = filter(lambda f: f.endswith('/%s.csv' % table), self.csv_files)
if file_name:
_logger.info('read file "%s"' % ( '%s.csv' % table))
file_name = file_name[0]
else:
_logger.info('file not found %s' % ( '%s.csv' % table))
return []
with open(file_name, 'rb') as csvfile:
fixed_file = StringIO(csvfile.read() .replace('\r\n', '\n'))
reader = csv.DictReader(fixed_file,
delimiter = self.import_options.get('separator'),
#quotechar = self.import_options.get('quoting'),
)
res = list(reader)
for line_num, line in enumerate(res):
line[self.COL_LINE_NUM] = str(line_num)
return res
def get_mapping(self):
res = [self.get_mapping_company()]
for c in self.companies:
company = c.get('name')
res.extend(
self.get_mapping_partners(company) +
[
self.get_mapping_journals(company),
self.get_mapping_nominal_codes(company),
self.get_mapping_transactions(company),
])
return res
def table_company(self):
t = DataFrame(self.companies)
return t
def finalize_companies(self):
for c in self.companies:
context = self.get_context_company(c.get('name'))()
company_id = context.get('company_id')
for year in [2012,2013,2014]:
existed = self.pool.get('account.fiscalyear').search(self.cr, self.uid, [('code','=',str(year)), ('company_id','=', company_id)])
if existed:
continue
year_id = self.pool.get('account.fiscalyear').create(self.cr, self.uid, {
'name':'%s (%s)' % (str(year), c.get('name')),
'code':str(year),
'date_start': time.strftime('%s-04-01' % year),
'date_stop': time.strftime('%s-03-31' % (year+1)),
'company_id': company_id
})
self.pool.get('account.fiscalyear').create_period3(self.cr, self.uid, [year_id])
def get_mapping_company(self):
return {
'name': self.TABLE_COMPANY,
'table': self.table_company,
'dependencies' : [],
'models':[
{'model' : 'res.company',
'finalize': self.finalize_companies,
'fields': {
'id': xml_id(self.TABLE_COMPANY, 'name'),
'name': 'name',
}
},
{'model' : 'account.account',
'hook': self.hook_account_account_root,
'fields': {
'id': xml_id(self.TABLE_NOMINAL_CODES_ROOT, 'name'),
'company_id/id': xml_id(self.TABLE_COMPANY, 'name'),
'code': const('0'),
'type': const('view'),
'name': 'name',
'user_type/id': const('account.data_account_type_view'),
}
}
]
}
def get_table(self, company, table):
def f():
t = DataFrame(self.get_data(company + table))
return t
return f
def get_partner_by_name(self, name):
id = self.pool['res.partner'].search(self.cr, self.uid, [('name','=', name)])
if isinstance(id, list):
if len(id)!=1:
return None
id = id[0]
return id
def get_hook_check_existed_partners(self, xml_id_mapper, field_name, another_hook=None):
def f(external_values):
if another_hook:
external_values = another_hook(external_values)
if not external_values:
return None
name = external_values.get(field_name)
if not name:
return None
id = self.get_partner_by_name(name)
if id:
# create new reference to existed record
xml_id_mapper.set_parent(self)
data_name = xml_id_mapper(external_values)
if self.pool.get('ir.model.data').search(self.cr, self.uid, [('name', '=', data_name)]):
# already created
return None
vals = {'name': data_name,
'model': 'res.partner',
#'module': self.module_name,
'module': '',
'res_id': id,
}
self.pool.get('ir.model.data').create(self.cr, self.uid, vals, context=self.context)
return None
return external_values # create new partner
return f
def get_mapping_partners(self, company):
table = company + self.TABLE_PARTNER
def f(customer=False, supplier=False):
table_cus_or_sup = self.TABLE_CUSTOMER if customer else self.TABLE_SUPPLIER
return {
'name': company + table_cus_or_sup,
'table': self.get_table(company, table_cus_or_sup),
'dependencies' : [self.TABLE_COMPANY],
'models':[
{'model' : 'res.partner',
'hook': self.get_hook_check_existed_partners(xml_id(table, self.COL_P_CODE), self.COL_P_NAME),
'fields': {
'id': xml_id(table, self.COL_P_CODE),
'company_id/id': self.company_id(company),
'name': self.COL_P_NAME,
'ref': self.COL_P_CODE,
'customer': const('1') if customer else const('0'),
'supplier': const('1') if supplier else const('0'),
'phone': self.COL_P_TELEPHONE,
#'mobile': self.COL_P_MOBILE,
'zip': self.COL_P_POST_CODE,
'street': self.COL_P_ADDRESS,
'street2': concat(self.COL_P_LINE_2,self.COL_P_LINE_3,self.COL_P_LINE_4),
'comment': ppconcat(self.COL_P_SOURCE),
}
},
{'model' : 'res.partner',
'hook': self.get_hook_check_existed_partners(xml_id(table+'_child', self.COL_P_CODE), self.COL_P_FULL_NAME, self.get_hook_ignore_empty(self.COL_P_MOBILE, self.COL_P_FULL_NAME)),
'fields': {
'id': xml_id(table+'_child', self.COL_P_CODE),
'company_id/id': self.company_id(company),
'parent_id/id': xml_id(table, self.COL_P_CODE),
'name': value(self.COL_P_FULL_NAME, default='NONAME'),
'customer': const('1') if customer else const('0'),
'supplier': const('1') if supplier else const('0'),
#'phone': self.COL_P_TELEPHONE,
'mobile': self.COL_P_MOBILE,
}
}
]
}
return [f(customer=True), f(supplier=True)]
def company_id(self, company):
id = self.get_xml_id(self.TABLE_COMPANY, 'name', {'name':company})
return const(id)
def get_hook_account_account(self, company):
def f(external_values):
id = self.get_xml_id(company + self.TABLE_NOMINAL_CODES, self.COL_NOMINAL_CODE, external_values)
res_id = self.pool.get('ir.model.data').xmlid_to_res_id(
self.cr,
self.uid,
'.'+id
)
if res_id:
# account already created
return None
external_values['company_name'] = company
return external_values
return f
def hook_account_account_root(self, external_values):
id = self.get_xml_id(self.TABLE_NOMINAL_CODES_ROOT, 'name', external_values)
res_id = self.pool.get('ir.model.data').xmlid_to_res_id(
self.cr,
self.uid,
'.'+id
)
if res_id:
# account already created
return None
return external_values
def get_mapping_nominal_codes(self, company):
table = company + self.TABLE_NOMINAL_CODES
return {
'name': table,
'table': self.get_table(company, self.TABLE_NOMINAL_CODES),
'dependencies' : [self.TABLE_COMPANY],
'models':[{
'model' : 'account.account',
'context': self.get_context_company(company),
'hook': self.get_hook_account_account(company),
'fields': {
'id': xml_id(table, self.COL_NOMINAL_CODE),
'company_id/id': self.company_id(company),
'code': self.COL_NOMINAL_CODE,
'name': self.COL_NOMINAL_CODE_NAME,
'user_type/id': const('account.data_account_type_view'),
'parent_id/id': xml_id(self.TABLE_NOMINAL_CODES_ROOT, 'company_name'),
}
}]
}
def get_xml_id(self, table, col, external_values):
id = xml_id(table, col)
id.set_parent(self)
return id(external_values)
map_journal_type = {
'SI':'sale',# Sales Invoice
'SC':'sale',# Sales Credit
'PC':'purchase',# Purchase Credit
'PI':'purchase',# Purchase Invoice
'JC':'general',# Journal Credit
'JD':'general',# Journal Debit
'BP':'bank',# Bank Payment
'BR':'bank',# Bank Receipt
}
def table_journal(self):
res = []
for code in self.map_journal_type:
res.append({self.COL_TR_TYPE: code})
t = DataFrame(res)
return t
def get_mapping_journals(self, company):
journal = company + self.TABLE_JOURNAL
return {
'name': journal,
'table': self.table_journal,
'dependencies' : [self.TABLE_COMPANY],
'models':[
{'model' : 'account.journal',
'context': self.get_context_company(company),
'fields': {
'id': xml_id(journal, self.COL_TR_TYPE),
'company_id/id': self.company_id(company),
'name': self.COL_TR_TYPE,
'code': self.COL_TR_TYPE,
'type': map_val(self.COL_TR_TYPE, self.map_journal_type),
}
},
]
}
def get_context_company(self, company):
def f():
company_id = self.pool.get('ir.model.data').xmlid_to_res_id(
self.cr,
self.uid,
'.'+self.company_id(company)({})
)
return {'company_id':company_id}
return f
def hook_bank_entries_move(self, external_values):
journal_type = external_values.get(self.COL_TR_TYPE)
if journal_type not in ['BR', 'BP']:
return None
return external_values
def hook_bank_entries_move_line(self, external_values):
external_values = self.hook_bank_entries_move(external_values)
if external_values is None:
return None
journal_type = external_values.get(self.COL_TR_TYPE)
external_values['debit'] = '0'
external_values['credit'] = '0'
external_values[self.COL_ID_CUSTOM] = external_values[self.COL_LINE_NUM]
bank = external_values.get(self.COL_TR_BANK)
debit = external_values.copy()
credit = external_values.copy()
debit[self.COL_ID_CUSTOM] += '_debit'
credit[self.COL_ID_CUSTOM] += '_credit'
amount = float(external_values.get(self.COL_TR_AMOUNT))
debit['debit'] = amount
credit['credit'] = amount
if journal_type == 'BP':
# expense
debit['account_id'] = external_values.get(self.COL_TR_CODE)
credit['account_id'] = external_values.get(self.COL_TR_BANK)
else:
# income
debit['account_id'] = external_values.get(self.COL_TR_BANK)
credit['account_id'] = external_values.get(self.COL_TR_CODE)
return [debit, credit]
def hook_journal_entries_move(self, external_values):
journal_type = external_values.get(self.COL_TR_TYPE)
if journal_type not in ['JC', 'JD', 'SI', 'SC', 'PI', 'PC']:
return None
if not external_values.get(self.COL_TR_TRANSACTION):
tr = 'journal-entry-%s-%s' % (
external_values.get(self.COL_TR_DATE),
external_values.get(self.COL_TR_AMOUNT),
)
external_values[self.COL_TR_TRANSACTION] = tr
return external_values
def hook_journal_entries_move_line(self, external_values):
external_values = self.hook_journal_entries_move(external_values)
if external_values is None:
return None
journal_type = external_values.get(self.COL_TR_TYPE)
amount = external_values.get(self.COL_TR_AMOUNT)
if journal_type in ['JC', 'SC', 'PC']:
external_values['debit']='0'
external_values['credit']=amount
else:
external_values['debit']=amount
external_values['credit']='0'
bank = external_values.get(self.COL_TR_BANK)
partner_id = ''
if bank and not bank.isdigit():
partner_id = bank
external_values['partner_id'] = partner_id
external_values[self.COL_ID_CUSTOM] = '%s-%s-%s'%(
external_values[self.COL_TR_TRANSACTION],
external_values[self.COL_TR_CODE],
external_values.get(self.COL_LINE_NUM)
)
res = [external_values]
if journal_type not in ['JC', 'JD']:
bank_line = external_values.copy()
bank_line['debit'] = external_values['credit']
bank_line['credit'] = external_values['debit']
bank_line[self.COL_TR_CODE] = '1200'
bank_line[self.COL_ID_CUSTOM] += '_extra'
res.append(bank_line)
return res
def get_mapping_transactions(self, company):
table = company + self.TABLE_TRANSACTION
move = table + '_move'
move_line = move + '_line'
journal = company + self.TABLE_JOURNAL
account = company + self.TABLE_NOMINAL_CODES
partner = company + self.TABLE_PARTNER
return {
'name': table,
'table': self.get_table(company, self.TABLE_TRANSACTION),
'dependencies' : [company + self.TABLE_JOURNAL,
company + self.TABLE_NOMINAL_CODES,
company + self.TABLE_CUSTOMER,
company + self.TABLE_SUPPLIER,
],
'models':[
# TODO VAT
# JC,JD, SI,SC, PC,PI
{'model' : 'account.move',
'hook': self.hook_journal_entries_move,
'context': self.get_context_company(company),
'fields': {
'id': xml_id(move, self.COL_TR_TRANSACTION),
'company_id/id': self.company_id(company),
'ref': self.COL_TR_TRANSACTION,
'journal_id/id': xml_id(journal, self.COL_TR_TYPE),
'period_id/.id': date_to_period(self.COL_TR_DATE, self.get_context_company(company)),
'date': fix_kashflow_date(self.COL_TR_DATE),
'narration': self.COL_TR_COMMENT,
}
},
{'model' : 'account.move.line',
'hook': self.hook_journal_entries_move_line,
'context': self.get_context_company(company),
'fields': {
'id': xml_id(move_line, self.COL_ID_CUSTOM),
'company_id/id': self.company_id(company),
'name': value(self.COL_TR_COMMENT, fallback=self.COL_TR_DATE, default='NONAME'),
'ref': self.COL_TR_TRANSACTION,
'date': fix_kashflow_date(self.COL_TR_DATE),
'move_id/id': xml_id(move, self.COL_TR_TRANSACTION),
'partner_id/.id': res_id(const(partner), 'partner_id', default=None),
'account_id/id': xml_id(account, self.COL_TR_CODE),
'debit':'debit',
'credit':'credit',
}
},
# BP,BR
{'model' : 'account.move',
'context': self.get_context_company(company),
'hook': self.hook_bank_entries_move,
'fields': {
'id': xml_id(move, self.COL_LINE_NUM),
'company_id/id': self.company_id(company),
'ref': self.COL_TR_TRANSACTION,
'journal_id/id': xml_id(journal, self.COL_TR_TYPE),
'period_id/.id': date_to_period(self.COL_TR_DATE, self.get_context_company(company)),
'date': fix_kashflow_date(self.COL_TR_DATE),
'narration': self.COL_TR_COMMENT,
}
},
{'model' : 'account.move.line',
'hook': self.hook_bank_entries_move_line,
'context': self.get_context_company(company),
'fields': {
'id': xml_id(move_line, self.COL_ID_CUSTOM),
'company_id/id': self.company_id(company),
'name': value(self.COL_TR_COMMENT, fallback=self.COL_TR_DATE, default='NONAME'),
'ref': self.COL_TR_TRANSACTION,
'date': fix_kashflow_date(self.COL_TR_DATE),
'move_id/id': xml_id(move, self.COL_LINE_NUM),
'account_id/id': xml_id(account, 'account_id'),
'debit':'debit',
'credit':'credit',
}
},
]
}
| lgpl-3.0 |
xzh86/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
eg-zhang/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
sunpy/solarbextrapolation | solarbextrapolation/analyticalmodels/base.py | 1 | 8497 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 19:30:22 2015
@author: alex_
"""
# General Imports
import matplotlib as mpl
mpl.use('TkAgg') # Force mpl backend not to use qt. Else we have a conflict.
import numpy as np
#import pickle
import time
from datetime import datetime
#from collections import namedtuple
import warnings
import inspect
#from sunpy.sun._constants import physical_constants as con
# SunPy imports
import sunpy.map
from sunpy.sun import constants, sun
from sunpy.time import parse_time, is_time
from astropy.table import Table
import astropy.units as u
# from mayavi import mlab
# Internal imports
#from solarbextrapolation.utilities import si_this_map
from solarbextrapolation.map3dclasses import Map3D
class AnalyticalModel(object):
"""
Common class for the development of anylitical models of magnetic fields.
Use the models to evaluate the accuracy of an extrapolation routine with
the figures of merit.
"""
def __init__(self, **kwargs):
# Default grid shape and physical ranges for the volume the model covers.
self.shape = kwargs.get('shape', u.Quantity([5, 5, 5] * u.pixel)) # (x,y,z)
self.xrange = kwargs.get('xrange', u.Quantity([-10, 10] * u.Mm))
self.yrange = kwargs.get('yrange', u.Quantity([-10, 10] * u.Mm))
self.yrange = kwargs.get('zrange', u.Quantity([0, 20] * u.Mm))
# Metadata
self.meta = {'ZNAXIS': 3, 'ZNAXIS1': self.shape[0].value, 'ZNAxXIS2': self.shape[0].value, 'ZNAXIS3': self.shape[0].value}
self.meta['analytical_model_notes'] = kwargs.get('notes', '')
self.meta['BUNIT'] = kwargs.get('bunit', u.T)
# CRVALn, CDELTn and NAXIS (alreadu in meta) used for storing range in 2D fits files.
self.filepath = kwargs.get('filepath', None)
self.routine = kwargs.get('analytical_model_routine', type(self))
# Default 3D magnetic field
#X,Y,Z = np.zeros(self.shape.value), np.zeros(self.shape.value), np.zeros(self.shape.value)
npField = np.zeros([3]+self.shape.value)
self.field = Map3D(npField, self.meta)
# Default magnetic field on boundary
magnetogram = np.zeros(self.shape[0:2].value)
magnetogram_header = {'ZNAXIS': 2, 'ZNAXIS1': self.shape[0].value, 'ZNAXIS2': self.shape[1].value}
self.magnetogram = sunpy.map.Map((magnetogram, magnetogram_header))
def _generate_field(self, **kwargs):
"""
The method for running a model to generate the field.
This is the primary method to be edited in subclasses for specific
model implementations.
"""
# Model code goes here.
arr_4d = np.zeros([self.map_boundary_data.data.shape[0], self.map_boundary_data.data.shape[1], 1, 3])
# Turn the 4D array into a Map3D object.
map_output = Map3D( arr_4d, self.meta, xrange=self.xrange, yrange=self.yrange, zrange=self.zrange, xobsrange=self.xrange, yobsrange=self.yrange )
return map_output
def generate(self, **kwargs):
"""
Method to be called to calculate the vector field and return as a Map3D object.
Times and saves the extrapolation where applicable.
"""
# Record the time and duration of the extrapolation.
dt_start = datetime.now()
tim_start = time.time()
arr_output = self._generate_field(**kwargs)
tim_duration = time.time() - tim_start
# Add the duration and time to the meta/header data.
arr_output.meta['extrapolator_start_time'] = dt_start.isoformat()
arr_output.meta['extrapolator_duration'] = tim_duration
arr_output.meta['extrapolator_duration_unit'] = u.s
# Save the Map3D if a filepath has been set. (to avoid loosing work)
if self.filepath:
arr_output.save(self.filepath)
# Add the output map to the object and return.
self.map = arr_output
return arr_output
def to_los_magnetogram(self, **kwargs):
"""
Calculate the LoS vector field as a SunPy map and return.
Generally this will require that you have run generate(self, ``**kwargs``)
first, so in the base class this is checked, but it is not always the
case as some models may allow this to be determined without calculating
the full field.
.. I'm not sure if this is a good default.
"""
return self.magnetogram
def to_vec_magnetogram(self, **kwargs):
"""
Calculate the vector field as a SunPy map and return.
Generally this will require that you have run ``generate(self, **kwargs)``
first, so in the base class this is checked, but it is not always the
case as some models may allow this to be determined without calculating
the full field. ######### I'm not sure if this is a good default.
"""
return self.magnetogram
'''if __name__ == '__main__':
# User-specified parameters
tup_shape = ( 20, 20, 20 )
x_range = ( -80.0, 80 ) * u.Mm
y_range = ( -80.0, 80 ) * u.Mm
z_range = ( 0.0, 120 ) * u.Mm
# Derived parameters (make SI where applicable)
x_0 = x_range[0].to(u.m).value
Dx = (( x_range[1] - x_range[0] ) / ( tup_shape[0] * 1.0 )).to(u.m).value
x_size = Dx * tup_shape[0]
y_0 = y_range[0].to(u.m).value
Dy = (( y_range[1] - y_range[0] ) / ( tup_shape[1] * 1.0 )).to(u.m).value
y_size = Dy * tup_shape[1]
z_0 = z_range[0].to(u.m).value
Dz = (( z_range[1] - z_range[0] ) / ( tup_shape[2] * 1.0 )).to(u.m).value
z_size = Dy * tup_shape[2]
# Define the extrapolator as a child of the Extrapolators class
class AnaOnes(AnalyticalModel):
def __init__(self, **kwargs):
super(AnaOnes, self).__init__(**kwargs)
def _generate_field(self, **kwargs):
# Adding in custom parameters to the metadata
self.meta['analytical_model_routine'] = 'Ones Model'
# Generate a trivial field and return (X,Y,Z,Vec)
arr_4d = np.ones(self.shape.value.tolist() + [3])
return Map3D( arr_4d, self.meta )
# Setup an anylitical model
xrange = u.Quantity([ 50, 300] * u.arcsec)
yrange = u.Quantity([-350, -100] * u.arcsec)
zrange = u.Quantity([ 0, 250] * u.arcsec)
aAnaMod = AnaOnes()
aMap3D = aAnaMod.generate()
# Visualise the 3D vector field
from solarbextrapolation.visualisation_functions import visualise
"""
fig = visualise(aMap3D,
show_boundary_axes=False,
boundary_units=[1.0*u.arcsec, 1.0*u.arcsec],
show_volume_axes=True,
debug=False)
"""
fig = visualise(aMap3D,
show_boundary_axes=False,
show_volume_axes=False,
debug=False)
mlab.show()
"""
# For B_I field only, to save re-creating this interpolator for every cell.
A_I_r_perp_interpolator = interpolate_A_I_from_r_perp(flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, (x_size**2 + y_size**2 + z_size**2)**(0.5)*1.2, 1000`0)
field = np.zeros( ( tup_shape[0], tup_shape[1], tup_shape[2], 3 ) )
for i in range(0, tup_shape[0]):
for j in range(0, tup_shape[1]):
for k in range(0, tup_shape[2]):
# Position of this point in space
x_pos = x_0 + ( i + 0.5 ) * Dx
y_pos = y_0 + ( j + 0.5 ) * Dy
z_pos = z_0 + ( k + 0.5 ) * Dz
#field[i,j,k] = B_theta(x_pos, y_pos, z_pos, flo_TD_a, flo_TD_d, flo_TD_R, flo_TD_I, flo_TD_I_0)
#field[i,j,k] = B_q(x_pos, y_pos, z_pos, flo_TD_L, flo_TD_d, flo_TD_q)
#field[i,j,k] = B_I(x_pos, y_pos, z_pos, flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, Dx, A_I_r_perp_interpolator)
field[i,j,k] = B_theta(x_pos, y_pos, z_pos, flo_TD_a, flo_TD_d, flo_TD_R, flo_TD_I, flo_TD_I_0) + B_q(x_pos, y_pos, z_pos, flo_TD_L, flo_TD_d, flo_TD_q) + B_I(x_pos, y_pos, z_pos, flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, Dx, A_I_r_perp_interpolator)
map_field = Map3D( field, {}, xrange=x_range, yrange=y_range, zrange=z_range )
np_boundary_data = field[:,:,0,2].T
dummyDataToMap(np_boundary_data, x_range, y_range)
#dic_boundary_data = { 'datavals': np_boundary_data.data.shape[0]**2, 'dsun_obs': 147065396219.34, }
visualise(map_field, scale=1.0*u.Mm, show_volume_axes=True, debug=True)
"""'''
| mit |
DanielJMaher/compliance-checker | compliance_checker/tests/test_acdd.py | 2 | 15456 | from compliance_checker.acdd import ACDD1_1Check, ACDD1_3Check
from compliance_checker.tests.resources import STATIC_FILES
from compliance_checker.tests import BaseTestCase
from netCDF4 import Dataset
import os
def to_singleton_var(l):
'''
Get the first value of a list if this implements iterator protocol and is
not a string
'''
return [x[0] if hasattr(x, '__iter__') and not isinstance(x, str) else x
for x in l]
def check_varset_nonintersect(group0, group1):
'''
Returns true if both groups contain the same elements, regardless of
order.
:param list group0: A list of strings to compare
:param list group1: A list of strings to compare
'''
# Performs symmetric difference on two lists converted to sets
return len(set(group0) ^ set(group1)) == 0
class TestACDD1_1(BaseTestCase):
# Adapted using `pandas.read_html` from URL
# http://wiki.esipfed.org/index.php/Attribute_Convention_for_Data_Discovery_1-1
expected = {
"Highly Recommended": [
"title",
"summary",
"keywords"
],
"Highly Recommended Variable Attributes": [
"long_name",
"standard_name",
"units",
"coverage_content_type"
],
"Recommended": [
"id",
"naming_authority",
"keywords_vocabulary",
"history",
"comment",
"date_created",
"creator_name",
"creator_url",
"creator_email",
"institution",
"project",
"processing_level",
"geospatial_bounds",
"geospatial_lat_min",
"geospatial_lat_max",
"geospatial_lon_min",
"geospatial_lon_max",
"geospatial_vertical_min",
"geospatial_vertical_max",
"time_coverage_start",
"time_coverage_end",
"time_coverage_duration",
"time_coverage_resolution",
"standard_name_vocabulary",
"license"
],
"Suggested": [
"contributor_name",
"contributor_role",
"publisher_name",
"publisher_url",
"publisher_email",
"date_modified",
"date_issued",
"geospatial_lat_units",
"geospatial_lat_resolution",
"geospatial_lon_units",
"geospatial_lon_resolution",
"geospatial_vertical_units",
"geospatial_vertical_resolution",
"geospatial_vertical_positive"
]
}
def setUp(self):
# Use the NCEI Gold Standard Point dataset for ACDD checks
self.ds = self.load_dataset(STATIC_FILES['ncei_gold_point_1'])
self.acdd = ACDD1_1Check()
self.acdd_highly_recommended = to_singleton_var(self.acdd.high_rec_atts)
self.acdd_recommended = to_singleton_var(self.acdd.rec_atts)
self.acdd_suggested = to_singleton_var(self.acdd.sug_atts)
def test_cc_meta(self):
assert self.acdd._cc_spec == 'acdd'
assert self.acdd._cc_spec_version == '1.1'
def test_highly_recommended(self):
'''
Checks that all highly recommended attributes are present
'''
assert check_varset_nonintersect(self.expected['Highly Recommended'],
self.acdd_highly_recommended)
# Check the reference dataset, NCEI 1.1 Gold Standard Point
results = self.acdd.check_high(self.ds)
for result in results:
self.assert_result_is_good(result)
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
empty_ds = Dataset(os.devnull, 'w', diskless=True)
self.addCleanup(empty_ds.close)
results = self.acdd.check_high(empty_ds)
for result in results:
self.assert_result_is_bad(result)
def test_recommended(self):
'''
Checks that all recommended attributes are present
'''
# 'geospatial_bounds' attribute currently has its own separate check
# from the list of required atts
assert check_varset_nonintersect(self.expected['Recommended'],
self.acdd_recommended)
ncei_exceptions = [
'geospatial_bounds',
'time_coverage_duration'
]
results = self.acdd.check_recommended(self.ds)
for result in results:
# NODC 1.1 doesn't have some ACDD attributes
if result.name in ncei_exceptions:
continue
# The NCEI Gold Standard Point is missing time_coverage_resolution...
if result.name == 'time_coverage_resolution':
self.assert_result_is_bad(result)
continue
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
empty_ds = Dataset(os.devnull, 'w', diskless=True)
self.addCleanup(empty_ds.close)
results = self.acdd.check_recommended(empty_ds)
for result in results:
self.assert_result_is_bad(result)
def test_suggested(self):
'''
Checks that all suggested attributes are present
'''
assert check_varset_nonintersect(self.expected['Suggested'],
self.acdd_suggested)
# Attributes that are missing from NCEI but should be there
missing = [
'geospatial_lat_resolution',
'geospatial_lon_resolution',
'geospatial_vertical_resolution'
]
results = self.acdd.check_suggested(self.ds)
for result in results:
if result.name in missing:
self.assert_result_is_bad(result)
continue
self.assert_result_is_good(result)
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
empty_ds = Dataset(os.devnull, 'w', diskless=True)
self.addCleanup(empty_ds.close)
results = self.acdd.check_recommended(empty_ds)
for result in results:
self.assert_result_is_bad(result)
def test_acknowldegement_check(self):
# Check British Spelling
try:
empty0 = Dataset(os.devnull, 'w', diskless=True)
result = self.acdd.check_acknowledgment(empty0)
self.assert_result_is_bad(result)
empty0.acknowledgement = "Attribution goes here"
result = self.acdd.check_acknowledgment(empty0)
self.assert_result_is_good(result)
finally:
empty0.close()
try:
# Check British spelling
empty1 = Dataset(os.devnull, 'w', diskless=True)
result = self.acdd.check_acknowledgment(empty1)
self.assert_result_is_bad(result)
empty1.acknowledgment = "Attribution goes here"
result = self.acdd.check_acknowledgment(empty1)
self.assert_result_is_good(result)
finally:
empty1.close()
class TestACDD1_3(BaseTestCase):
# Adapted using `pandas.read_html` from URL
# http://wiki.esipfed.org/index.php/Attribute_Convention_for_Data_Discovery_1-3
expected = {
"Suggested": [
"creator_type",
"creator_institution",
"publisher_type",
"publisher_institution",
"program",
"contributor_name",
"contributor_role",
"geospatial_lat_units",
"geospatial_lat_resolution",
"geospatial_lon_units",
"geospatial_lon_resolution",
"geospatial_vertical_units",
"geospatial_vertical_resolution",
"date_modified",
"date_issued",
"date_metadata_modified",
"product_version",
"keywords_vocabulary",
"platform",
"platform_vocabulary",
"instrument",
"instrument_vocabulary",
"metadata_link",
"references"
],
"Highly Recommended": [
"title",
"summary",
"keywords",
"Conventions"
],
"Recommended": [
"id",
"naming_authority",
"history",
"source",
"processing_level",
"comment",
"license",
"standard_name_vocabulary",
"date_created",
"creator_name",
"creator_email",
"creator_url",
"institution",
"project",
"publisher_name",
"publisher_email",
"publisher_url",
"geospatial_bounds",
"geospatial_bounds_crs",
"geospatial_bounds_vertical_crs",
"geospatial_lat_min",
"geospatial_lat_max",
"geospatial_lon_min",
"geospatial_lon_max",
"geospatial_vertical_min",
"geospatial_vertical_max",
"geospatial_vertical_positive",
"time_coverage_start",
"time_coverage_end",
"time_coverage_duration",
"time_coverage_resolution"
],
"Highly Recommended Variable Attributes": [
"long_name",
"standard_name",
"units",
"coverage_content_type"
]
}
def setUp(self):
# Use the NCEI Gold Standard Point dataset for ACDD checks
self.ds = self.load_dataset(STATIC_FILES['ncei_gold_point_2'])
self.acdd = ACDD1_3Check()
self.acdd_highly_recommended = to_singleton_var(self.acdd.high_rec_atts)
self.acdd_recommended = to_singleton_var(self.acdd.rec_atts)
self.acdd_suggested = to_singleton_var(self.acdd.sug_atts)
def test_cc_meta(self):
assert self.acdd._cc_spec == 'acdd'
assert self.acdd._cc_spec_version == '1.3'
def test_highly_recommended(self):
'''
Checks that all highly recommended attributes are present
'''
assert check_varset_nonintersect(self.expected['Highly Recommended'],
self.acdd_highly_recommended)
results = self.acdd.check_high(self.ds)
for result in results:
# NODC 2.0 has a different value in the conventions field
self.assert_result_is_good(result)
def test_recommended(self):
'''
Checks that all recommended attributes are present
'''
assert check_varset_nonintersect(self.expected['Recommended'],
self.acdd_recommended)
results = self.acdd.check_recommended(self.ds)
ncei_exceptions = [
'time_coverage_duration',
'time_coverage_resolution'
]
for result in results:
if result.name in ncei_exceptions:
self.assert_result_is_bad(result)
continue
self.assert_result_is_good(result)
def test_suggested(self):
'''
Checks that all suggested attributes are present
'''
assert check_varset_nonintersect(self.expected['Suggested'],
self.acdd_suggested)
results = self.acdd.check_suggested(self.ds)
# NCEI does not require or suggest resolution attributes
ncei_exceptions = [
'geospatial_lat_resolution',
'geospatial_lon_resolution',
'geospatial_vertical_resolution'
]
for result in results:
if result.name in ncei_exceptions:
self.assert_result_is_bad(result)
continue
self.assert_result_is_good(result)
def test_variables(self):
'''
Test that variables are checked for required attributes
'''
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
empty_ds = Dataset(os.devnull, 'w', diskless=True)
self.addCleanup(empty_ds.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
empty_ds.createDimension('time', 1)
empty_ds.createVariable('fake', 'float32', ('time',))
# long_name
results = self.acdd.check_var_long_name(self.ds)
for result in results:
self.assert_result_is_good(result)
results = self.acdd.check_var_long_name(empty_ds)
assert len(results) == 1
for result in results:
self.assert_result_is_bad(result)
# standard_name
results = self.acdd.check_var_standard_name(self.ds)
for result in results:
self.assert_result_is_good(result)
results = self.acdd.check_var_standard_name(empty_ds)
assert len(results) == 1
for result in results:
self.assert_result_is_bad(result)
# units
results = self.acdd.check_var_units(self.ds)
for result in results:
self.assert_result_is_good(result)
results = self.acdd.check_var_units(empty_ds)
assert len(results) == 1
for result in results:
self.assert_result_is_bad(result)
def test_acknowledgement(self):
'''
Test acknowledgement attribute is being checked
'''
empty_ds = Dataset(os.devnull, 'w', diskless=True)
self.addCleanup(empty_ds.close)
result = self.acdd.check_acknowledgment(self.ds)
self.assert_result_is_good(result)
result = self.acdd.check_acknowledgment(empty_ds)
self.assert_result_is_bad(result)
def test_vertical_extents(self):
'''
Test vertical extents are being checked
'''
result = self.acdd.check_vertical_extents(self.ds)
self.assert_result_is_good(result)
def test_time_extents(self):
'''
Test that the time extents are being checked
'''
result = self.acdd.check_time_extents(self.ds)
self.assert_result_is_good(result)
empty_ds = Dataset(os.devnull, 'w', diskless=True)
self.addCleanup(empty_ds.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
empty_ds.createDimension('time', 1)
time_var = empty_ds.createVariable('time', 'float32', ('time',))
time_var.units = 'seconds since 1970-01-01 00:00:00 UTC'
time_var[:] = [1451692800] # 20160102T000000Z in seconds since epoch
empty_ds.time_coverage_start = '20160102T000000Z'
empty_ds.time_coverage_end = '20160102T000000Z'
result = self.acdd.check_time_extents(empty_ds)
self.assert_result_is_good(result)
# try the same thing with time offsets
time_var.units = 'seconds since 1970-01-01 00:00:00-10:00'
empty_ds.time_coverage_start = '20160102T000000-1000'
empty_ds.time_coverage_end = '20160102T000000-1000'
result = self.acdd.check_time_extents(empty_ds)
self.assert_result_is_good(result)
| apache-2.0 |
apdavison/IzhikevichModel | PyNN/old/test_IF_cond_exp.py | 2 | 2394 |
from pyNN.random import RandomDistribution, NumpyRNG
from pyNN.neuron import *
from pyNN.utility import get_script_args, Timer, ProgressBar, init_logging, normalized_filename
import matplotlib.pyplot as plt
import numpy as np
timeStep = 0.01
setup(timestep=timeStep, min_delay=0.5)
tau_m = 20.0 # [ms]
cm = 0.2 # [nF]
v_rest = -60.0 # [mV]
v_thresh = -50.0 # [mV]
tau_syn_E = 5.0 # [ms]
tau_syn_I = 10.0 # [ms]
e_rev_E = 0.0 # [mV]
e_rev_I = -80.0 # [mV]
v_reset = -60.0 # [mV]
tau_refrac = 5.0 # [ms]
i_offset = 0.0 # [nA]
neuronParameters = {
'tau_m': tau_m,
'cm': cm,
'v_rest': v_rest,
'v_thresh': v_thresh,
'tau_syn_E': tau_syn_E,
'tau_syn_I': tau_syn_I,
'e_rev_E': e_rev_E,
'e_rev_I': e_rev_I,
'v_reset': v_reset,
'tau_refrac': tau_refrac,
'i_offset': i_offset
}
cell_type = IF_cond_exp(**neuronParameters)
rand_distr = RandomDistribution('uniform', (v_reset, v_thresh), rng=NumpyRNG(seed=85524))
neuron = create(cell_type)
neuron.initialize(neuron=rand_distr)
neuron.record('v')
totalTimes = np.zeros(0)
totalAmps = np.zeros(0)
times = np.linspace(0.0, 30.0, int(1 + (30.0 - 0.0) / timeStep))
amps = np.linspace(0.0, 0.0, int(1 + (30.0 - 0.0) / timeStep))
totalTimes = np.append(totalTimes, times)
totalAmps = np.append(totalAmps, amps)
times = np.linspace(30 + timeStep, 300, int((300 - 30) / timeStep))
amps = np.linspace(0.005 * timeStep, 0.005 * (300 - 30), int((300 - 30) / timeStep))
totalTimes = np.append(totalTimes, times)
totalAmps = np.append(totalAmps, amps)
injectedCurrent = StepCurrentSource(times=totalTimes, amplitudes=totalAmps)
injectedCurrent.inject_into(neuron)
run(300)
data = neuron.get_data().segments[0]
plt.ion()
fig = plt.figure(1, facecolor='white')
ax1 = fig.add_subplot(5, 4, 7)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
ax1.spines['left'].set_color('None')
ax1.spines['right'].set_color('None')
ax1.spines['bottom'].set_color('None')
ax1.spines['top'].set_color('None')
ax1.set_title('(G) Class 1 excitable')
vm = data.filter(name='v')[0]
plt.plot(vm.times, vm, [0, 30, 300, 300],[-90, -90, -70, -90])
plt.show(block=False)
fig.canvas.draw()
raw_input("Simulation finished... Press enter to exit...")
| bsd-3-clause |
stuart-knock/tvb-library | contrib/from_articles/region_deterministic_bnm_sj3d_a.py | 5 | 6407 | # -*- coding: utf-8 -*-
"""
What:
Reproduces Figures 23 and 24of Sanz-Leon P., Knock, S. A., Spiegler, A. and Jirsa V.
Mathematical framework for large-scale brain network modelling in The Virtual Brain.
Neuroimage, 2014, (in review)
Needs:
A working installation of tvb
Run:
python region_deterministic_bnm_sjd3d_a.py -s True -f True
#Subsequent calls can be made with:
python region_deterministic_bnm_sj3d_a.py -f True
.. author:: Paula Sanz-Leon
"""
import numpy
import argparse
from tvb.simulator.lab import *
import matplotlib.pylab as pylab
pylab.rcParams['figure.figsize'] = 20, 15 # that's default image size for this interactive session
pylab.rcParams.update({'font.size': 22})
pylab.rcParams.update({'lines.linewidth': 3})
pylab.rcParams.update({'axes.linewidth': 3})
parser = argparse.ArgumentParser(description='Reproduce results of Figure XX presented in Sanz-Leon et al 2014')
parser.add_argument('-s','--sim', help='Run the simulations', default=False)
parser.add_argument('-f','--fig', help='Plot the figures', default=False)
args = vars(parser.parse_args())
idx = ['a0', 'a1', 'a2']
gcs = [0.0, 0.5, 1.0]
simulation_length = 2e3
speed = 10.
if args['sim']:
for i in range(3):
oscilator = models.ReducedSetHindmarshRose()
oscilator.variables_of_interest = ["xi", "eta", "tau", "alpha", "beta", "gamma"]
white_matter = connectivity.Connectivity.from_file("connectivity_66.zip")
white_matter.speed = numpy.array([speed])
white_matter_coupling = coupling.Linear(a=gcs[i])
#Initialise an Integrator
heunint = integrators.HeunDeterministic(dt=0.1)
#Initialise some Monitors with period in physical time
momo = monitors.Raw()
mama = monitors.TemporalAverage(period=1.)
#Bundle them
what_to_watch = (momo, mama)
#Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors.
sim = simulator.Simulator(model = oscilator, connectivity = white_matter,
coupling = white_matter_coupling,
integrator = heunint, monitors = what_to_watch)
sim.configure()
# LOG.info("Starting simulation...")
# #Perform the simulation
raw_data = []
raw_time = []
tavg_data = []
tavg_time = []
for raw, tavg in sim(simulation_length=simulation_length):
if not raw is None:
raw_time.append(raw[0])
raw_data.append(raw[1])
if not tavg is None:
tavg_time.append(tavg[0])
tavg_data.append(tavg[1])
LOG.info("Finished simulation.")
#Make the lists numpy.arrays for easier use.
TAVG = numpy.asarray(tavg_data)
RAW = numpy.asarray(raw_data)
LOG.info("Saving simulated data ...")
numpy.save('region_deterministic_bnm_sj3d_raw_' + idx[i] + '.npy', RAW)
numpy.save('region_deterministic_bnm_sj3d_tavg_' + idx[i] + '.npy', TAVG)
numpy.save('region_deterministic_bnm_sj3d_rawtime_' + idx[i] + '.npy', raw_time)
numpy.save('region_deterministic_bnm_sj3d_tavgtime_' + idx[i] + '.npy', tavg_time)
if args['fig']:
for i in range(3):
start_point = simulation_length // 4
LOG.info("Generating pretty pictures ...")
TAVG = numpy.load('region_deterministic_bnm_sj3d_tavg_' + idx[i] + '.npy')
tavg_time = numpy.load('region_deterministic_bnm_sj3d_tavgtime_' + idx[i] + '.npy')[start_point:]
fig= figure(1)
clf()
for k in range(3):
# load data
# compute time and use sim_length
ax=subplot(3, 3, 4+k)
plot(tavg_time, TAVG[start_point:, 0, :, k],'k', alpha=0.013, linewidth=3)
plot(tavg_time, TAVG[start_point:, 1, :, k],'r', alpha=0.013, linewidth=3)
plot(tavg_time, TAVG[start_point:, 0, :, k].mean(axis=1), 'k')
plot(tavg_time, TAVG[start_point:, 1, :, k].mean(axis=1), 'r')
ylim([-10, 3])
xlim([start_point, int(simulation_length)])
for label in ax.get_yticklabels():
label.set_fontsize(20)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
if k==0:
ylabel('[au]')
yticks((-8, 0, 2), ('-8', '0', '2'))
title(r'TS ($m=1$)')
ax=subplot(3, 3, 7+k)
plot(tavg_time, TAVG[start_point:, 3, :, k],'k', alpha=0.013, linewidth=3)
plot(tavg_time, TAVG[start_point:, 4, :, k],'r', alpha=0.013, linewidth=3)
plot(tavg_time, TAVG[start_point:, 3, :, k].mean(axis=1), 'k')
plot(tavg_time, TAVG[start_point:, 4, :, k].mean(axis=1), 'r')
ylim([-10, 3])
xlim([start_point, int(simulation_length)])
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
xticks((start_point, simulation_length /2. , simulation_length),
(str(int(start_point)), str(int(simulation_length //2)), str(int(simulation_length))))
xlabel('time[ms]')
if k==0:
ylabel('[au]')
yticks((-8, 0, 2), ('-8', '0', '2'))
title(r'TS ($m=2$)')
ax=subplot(3, 3, 1+k)
plot(TAVG[start_point:, 0, :, k], TAVG[start_point:, 1, :, k],'b', alpha=0.013)
plot(TAVG[start_point:, 0, :, k].mean(axis=1), TAVG[start_point:, 1, :, k].mean(axis=1), 'b')
title(r'PP ($o=%s$)' % str(k))
# plot(TAVG[:, 3, :, k], TAVG[:, 4, :, k],'b', alpha=0.042)
# plot(TAVG[:, 3, :, k].mean(axis=1), TAVG[:, 4, :, k].mean(axis=1), 'b')
ax.yaxis.set_label_position("right")
ylim([-10, 3])
xlim([-10, 3])
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
if k==1:
xticks((-8, 0, 2), ('-8', '0', '2'))
ax.xaxis.labelpad = -10
xlabel(r'$\xi$')
yticks((-8, 0, 2), ('-8', '0', '2'))
ylabel(r'$\eta$')
fig_name = 'SJ3D_default_speed_' + str(int(speed)) + '-config_gcs-' + idx[i] + '.pdf'
savefig(fig_name)
###EoF###
| gpl-2.0 |
CFIS-Octarine/octarine | src/ossos_tests/test_ossos/test_fitsviewer/test_interaction.py | 2 | 7159 | __author__ = "David Rusk <drusk@uvic.ca>"
import unittest
from hamcrest import assert_that, instance_of, equal_to, none
from matplotlib.backend_bases import MouseEvent as MPLMouseEvent
from mock import Mock, patch
from ossos.fitsviewer.displayable import ImageSinglet
from ossos.fitsviewer.interaction import InteractionContext, MoveMarkerState, CreateMarkerState, AdjustColormapState
class InteractionTest(unittest.TestCase):
def setUp(self):
mainhdu = Mock()
mainhdu.data.shape = (100, 100)
self.hdulist = [mainhdu]
self.figure = Mock()
with patch.object(ImageSinglet, "_create_axes"):
self.image = ImageSinglet(self.hdulist, self.figure, [0, 0, 1, 1])
self.image.axes = Mock()
self.interaction_context = InteractionContext(self.image)
def _create_mouse_event(self, x, y, button, inaxes=True):
event = Mock(spec=MPLMouseEvent)
event.x = x
event.xdata = x
event.y = y
event.ydata = y
event.button = button
if inaxes:
event.inaxes = self.image.axes
else:
event.inaxes = Mock() # a new, different axes
return event
def fire_press_event(self, x, y, button=InteractionContext.MOUSE_BUTTON_LEFT,
inaxes=True):
self.interaction_context.on_press(
self._create_mouse_event(x, y, button, inaxes))
def fire_release_event(self, button=InteractionContext.MOUSE_BUTTON_LEFT):
event = Mock(spec=MPLMouseEvent)
event.button = button
self.interaction_context.on_release(event)
def fire_motion_event(self, x, y, inaxes=True):
self.interaction_context.on_motion(
self._create_mouse_event(x, y, inaxes))
def test_state_click_in_circle(self):
x = 10
y = 10
radius = 5
self.image.place_marker(x, y, radius)
self.fire_press_event(x + 2, y + 2)
assert_that(self.interaction_context.state, instance_of(MoveMarkerState))
def test_press_release(self):
x = 10
y = 10
radius = 5
self.image.place_marker(x, y, radius)
assert_that(not self.interaction_context.state.pressed)
self.fire_press_event(x + 2, y + 2)
assert_that(self.interaction_context.state.pressed)
self.fire_release_event()
assert_that(not self.interaction_context.state.pressed)
def test_state_click_outside_marker(self):
x = 10
y = 10
radius = 5
self.image.place_marker(x, y, radius)
self.fire_press_event(x + 2, y + 2)
assert_that(self.interaction_context.state, instance_of(MoveMarkerState))
self.fire_release_event()
assert_that(self.interaction_context.state, instance_of(MoveMarkerState))
self.fire_press_event(x + 6, y + 6)
assert_that(self.interaction_context.state, instance_of(CreateMarkerState))
def test_state_right_click(self):
x = 10
y = 10
self.fire_press_event(x, y, button=InteractionContext.MOUSE_BUTTON_LEFT)
assert_that(self.interaction_context.state, instance_of(CreateMarkerState))
self.fire_release_event(button=InteractionContext.MOUSE_BUTTON_LEFT)
self.fire_press_event(x, y, button=InteractionContext.MOUSE_BUTTON_RIGHT)
assert_that(self.interaction_context.state, instance_of(AdjustColormapState))
self.fire_release_event(button=InteractionContext.MOUSE_BUTTON_RIGHT)
self.fire_press_event(x, y, button=InteractionContext.MOUSE_BUTTON_LEFT)
assert_that(self.interaction_context.state, instance_of(CreateMarkerState))
self.fire_release_event(button=InteractionContext.MOUSE_BUTTON_LEFT)
def test_drag_marker(self):
x0 = 10
y0 = 10
radius = 5
xclick = x0 + 2
yclick = y0 + 2
dx = 10
dy = 5
self.image.place_marker(x0, y0, radius)
assert_that(self.interaction_context.get_marker().center, equal_to((x0, y0)))
self.fire_press_event(xclick, yclick)
self.fire_motion_event(xclick + dx, yclick + dy)
assert_that(self.interaction_context.get_marker().center,
equal_to((x0 + dx, y0 + dy)))
assert_that(self.interaction_context.get_marker().radius, equal_to(radius))
def test_create_marker(self):
x0 = 10
y0 = 10
dx = 10
dy = 30
assert_that(self.interaction_context.get_marker(), none())
self.fire_press_event(x0, y0)
self.fire_motion_event(x0 + dx, y0 + dy)
assert_that(self.interaction_context.get_marker().center,
equal_to((15, 25)))
assert_that(self.interaction_context.get_marker().radius, equal_to(15))
def test_motion_not_pressed(self):
x = 10
y = 10
radius = 5
self.image.place_marker(x, y, radius)
self.interaction_context.state = CreateMarkerState(self.interaction_context)
self.fire_motion_event(x + 2, y + 2)
assert_that(self.interaction_context.get_marker().center, equal_to((x, y)))
assert_that(self.interaction_context.get_marker().radius, equal_to(radius))
self.interaction_context.state = MoveMarkerState(self.interaction_context)
self.fire_motion_event(x + 2, y + 2)
assert_that(self.interaction_context.get_marker().center, equal_to((x, y)))
assert_that(self.interaction_context.get_marker().radius, equal_to(radius))
def test_click_no_drag_inside_marker(self):
x = 10
y = 10
radius = 5
self.image.place_marker(x, y, radius)
click_x = 12
click_y = 13
self.fire_press_event(click_x, click_y)
self.fire_release_event()
assert_that(self.interaction_context.get_marker().center,
equal_to((click_x, click_y)))
def test_click_no_drag_outside_marker(self):
x = 10
y = 10
radius = 5
self.image.place_marker(x, y, radius)
click_x = 20
click_y = 21
self.fire_press_event(click_x, click_y)
self.fire_release_event()
assert_that(self.interaction_context.get_marker().center,
equal_to((click_x, click_y)))
def test_xy_changed_event_on_click(self):
handler = Mock()
self.image.xy_changed.connect(handler)
self.image.place_marker(10, 10, 5)
x_click = 20
y_click = 30
self.fire_press_event(x_click, y_click)
self.fire_release_event()
handler.assert_called_once_with(x_click, y_click)
def test_xy_changed_event_on_drag(self):
handler = Mock()
self.image.xy_changed.connect(handler)
x0 = 10
y0 = 10
radius = 5
self.image.place_marker(x0, y0, radius)
xclick = x0 + 2
yclick = y0 + 2
dx = 10
dy = 20
self.fire_press_event(xclick, yclick)
self.fire_motion_event(xclick + dx, yclick + dy)
handler.assert_called_once_with(x0 + dx, y0 + dy)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
ericmjl/bokeh | examples/reference/models/radio_button_group_server.py | 1 | 1317 | ## Bokeh server for Radio Button Group
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import row
from bokeh.models import ColumnDataSource, RadioButtonGroup
from bokeh.plotting import figure
x=[3,4,6,12,10,1,5,6,3,8]
y=[7,1,3,4,1,6,10,4,10,3]
label=['Red', 'Orange', 'Red', 'Orange','Red', 'Orange','Red', 'Orange','Red', 'Orange',]
df=pd.DataFrame({'x':x,'y':y,'label':label})
source = ColumnDataSource(data=dict(x=df.x, y=df.y,label=df.label))
plot_figure = figure(title='Radio Button Group',plot_height=450, plot_width=600,
tools="save,reset", toolbar_location="below")
plot_figure.scatter('x', 'y',color='label', source=source, size=10)
radio_button_group = RadioButtonGroup(labels=["Red", "Orange"])
def radiogroup_click(attr,old,new):
active_radio=radio_button_group.active ##Getting radio button value
# filter the dataframe with value in radio-button
if active_radio==0:
selected_df = df[df['label'] == 'Red']
elif active_radio==1:
selected_df = df[df['label'] == "Orange"]
source.data=dict(x=selected_df.x, y=selected_df.y,label=selected_df.label)
radio_button_group.on_change('active',radiogroup_click)
layout=row(radio_button_group, plot_figure)
curdoc().add_root(layout)
curdoc().title = "Radio Button Group Bokeh Server"
| bsd-3-clause |
sgiavasis/nipype | build_docs.py | 10 | 6995 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Code to build the documentation in the setup.py
To use this code, run::
python setup.py build_sphinx
"""
from __future__ import print_function
# Standard library imports
import sys
import os
from os.path import join as pjoin
import zipfile
import warnings
import shutil
from distutils.cmd import Command
from distutils.command.clean import clean
_info_fname = pjoin(os.path.dirname(__file__), 'nipype', 'info.py')
INFO_VARS = {}
exec(open(_info_fname, 'rt').read(), {}, INFO_VARS)
DOC_BUILD_DIR = os.path.join('doc', '_build', 'html')
DOC_DOCTREES_DIR = os.path.join('doc', '_build', 'doctrees')
################################################################################
# Distutils Command class for installing nipype to a temporary location.
class TempInstall(Command):
temp_install_dir = os.path.join('build', 'install')
def run(self):
""" build and install nipype in a temporary location. """
install = self.distribution.get_command_obj('install')
install.install_scripts = self.temp_install_dir
install.install_base = self.temp_install_dir
install.install_platlib = self.temp_install_dir
install.install_purelib = self.temp_install_dir
install.install_data = self.temp_install_dir
install.install_lib = self.temp_install_dir
install.install_headers = self.temp_install_dir
install.run()
# Horrible trick to reload nipype with our temporary instal
for key in list(sys.modules.keys()):
if key.startswith('nipype'):
sys.modules.pop(key, None)
sys.path.append(os.path.abspath(self.temp_install_dir))
# Pop the cwd
sys.path.pop(0)
import nipype
def initialize_options(self):
pass
def finalize_options(self):
pass
################################################################################
# Distutils Command class for API generation
class APIDocs(TempInstall):
description = \
"""generate API docs """
user_options = [
('None', None, 'this command has no options'),
]
def run(self):
# First build the project and install it to a temporary location.
TempInstall.run(self)
os.chdir('doc')
try:
# We are running the API-building script via an
# system call, but overriding the import path.
toolsdir = os.path.abspath(pjoin('..', 'tools'))
for docbuilder in ['build_interface_docs.py']:
build_templates = pjoin(toolsdir, docbuilder)
cmd = """%s -c 'import sys; sys.path.append("%s"); sys.path.append("%s"); execfile("%s", dict(__name__="__main__"))'""" \
% (sys.executable,
toolsdir,
self.temp_install_dir,
build_templates)
os.system(cmd)
finally:
os.chdir('..')
################################################################################
# Code to copy the sphinx-generated html docs in the distribution.
def relative_path(filename):
""" Return the relative path to the file, assuming the file is
in the DOC_BUILD_DIR directory.
"""
length = len(os.path.abspath(DOC_BUILD_DIR)) + 1
return os.path.abspath(filename)[length:]
################################################################################
# Distutils Command class build the docs
# Sphinx import.
try:
from sphinx.setup_command import BuildDoc
except:
MyBuildDoc = None
else:
class MyBuildDoc(BuildDoc):
""" Sub-class the standard sphinx documentation building system, to
add logics for API generation and matplotlib's plot directive.
"""
def run(self):
self.run_command('api_docs')
# We need to be in the doc directory for to plot_directive
# and API generation to work
"""
os.chdir('doc')
try:
BuildDoc.run(self)
finally:
os.chdir('..')
"""
# It put's the build in a doc/doc/_build directory with the
# above?!?! I'm leaving the code above here but commented out
# in case I'm missing something?
BuildDoc.run(self)
self.zip_docs()
def zip_docs(self):
if not os.path.exists(DOC_BUILD_DIR):
raise OSError('Doc directory does not exist.')
target_file = os.path.join('doc', 'documentation.zip')
# ZIP_DEFLATED actually compresses the archive. However, there
# will be a RuntimeError if zlib is not installed, so we check
# for it. ZIP_STORED produces an uncompressed zip, but does not
# require zlib.
try:
zf = zipfile.ZipFile(target_file, 'w',
compression=zipfile.ZIP_DEFLATED)
except RuntimeError:
warnings.warn('zlib not installed, storing the docs '
'without compression')
zf = zipfile.ZipFile(target_file, 'w',
compression=zipfile.ZIP_STORED)
for root, dirs, files in os.walk(DOC_BUILD_DIR):
relative = relative_path(root)
if not relative.startswith('.doctrees'):
for f in files:
zf.write(os.path.join(root, f),
os.path.join(relative, 'html_docs', f))
zf.close()
def finalize_options(self):
""" Override the default for the documentation build
directory.
"""
self.build_dir = os.path.join(*DOC_BUILD_DIR.split(os.sep)[:-1])
BuildDoc.finalize_options(self)
################################################################################
# Distutils Command class to clean
class Clean(clean):
def run(self):
clean.run(self)
api_path = os.path.join('doc', 'api', 'generated')
if os.path.exists(api_path):
print("Removing %s" % api_path)
shutil.rmtree(api_path)
interface_path = os.path.join('doc', 'interfaces', 'generated')
if os.path.exists(interface_path):
print("Removing %s" % interface_path)
shutil.rmtree(interface_path)
if os.path.exists(DOC_BUILD_DIR):
print("Removing %s" % DOC_BUILD_DIR)
shutil.rmtree(DOC_BUILD_DIR)
if os.path.exists(DOC_DOCTREES_DIR):
print("Removing %s" % DOC_DOCTREES_DIR)
shutil.rmtree(DOC_DOCTREES_DIR)
# The command classes for distutils, used by the setup.py
cmdclass = {'build_sphinx': MyBuildDoc,
'api_docs': APIDocs,
'clean': Clean,
}
| bsd-3-clause |
ClementPhil/deep-learning | image-classification/helper.py | 155 | 5631 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the test data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all test data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_test.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
FHannes/intellij-community | python/testData/debug/test_dataframe.py | 23 | 1309 | import pandas as pd
import numpy as np
df1 = pd.DataFrame({'row': [0, 1, 2],
'One_X': [1.1, 1.1, 1.1],
'One_Y': [1.2, 1.2, 1.2],
'Two_X': [1.11, 1.11, 1.11],
'Two_Y': [1.22, 1.22, 1.22]})
print(df1) ###line 8
df2 = pd.DataFrame({'row': [0, 1, 2],
'One_X': [1.1, 1.1, 1.1],
'One_Y': [1.2, 1.2, 1.2],
'Two_X': [1.11, 1.11, 1.11],
'Two_Y': [1.22, 1.22, 1.22],
'LABELS': ['A', 'B', 'C']})
print(df2) ##line 16
df3 = pd.DataFrame(data={'Province' : ['ON','QC','BC','AL','AL','MN','ON'],
'City' : ['Toronto','Montreal','Vancouver','Calgary','Edmonton','Winnipeg','Windsor'],
'Sales' : [13,6,16,8,4,3,1]})
table = pd.pivot_table(df3,values=['Sales'],index=['Province'],columns=['City'],aggfunc=np.sum,margins=True)
table.stack('City')
print(df3)
df4 = pd.DataFrame({'row': np.random.random(10000),
'One_X': np.random.random(10000),
'One_Y': np.random.random(10000),
'Two_X': np.random.random(10000),
'Two_Y': np.random.random(10000),
'LABELS': ['A'] * 10000})
print(df4) ##line 31
| apache-2.0 |
low-sky/GAS | ah_bootstrap.py | 31 | 36163 | """
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken (however,
``ah_bootstrap.use_astropy_helpers`` may be called manually from within the
setup.py script).
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import imp
import io
import locale
import os
import re
import subprocess as sp
import sys
try:
from ConfigParser import ConfigParser, RawConfigParser
except ImportError:
from configparser import ConfigParser, RawConfigParser
if sys.version_info[0] < 3:
_str_types = (str, unicode)
_text_type = unicode
PY3 = False
else:
_str_types = (str, bytes)
_text_type = str
PY3 = True
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Some pre-setuptools checks to ensure that either distribute or setuptools >=
# 0.7 is used (over pre-distribute setuptools) if it is available on the path;
# otherwise the latest setuptools will be downloaded and bootstrapped with
# ``ez_setup.py``. This used to be included in a separate file called
# setuptools_bootstrap.py; but it was combined into ah_bootstrap.py
try:
import pkg_resources
_setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7')
# This may raise a DistributionNotFound in which case no version of
# setuptools or distribute is properly installed
_setuptools = pkg_resources.get_distribution('setuptools')
if _setuptools not in _setuptools_req:
# Older version of setuptools; check if we have distribute; again if
# this results in DistributionNotFound we want to give up
_distribute = pkg_resources.get_distribution('distribute')
if _setuptools != _distribute:
# It's possible on some pathological systems to have an old version
# of setuptools and distribute on sys.path simultaneously; make
# sure distribute is the one that's used
sys.path.insert(1, _distribute.location)
_distribute.activate()
imp.reload(pkg_resources)
except:
# There are several types of exceptions that can occur here; if all else
# fails bootstrap and use the bootstrapped version
from ez_setup import use_setuptools
use_setuptools()
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
# In case it didn't successfully import before the ez_setup checks
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
from setuptools.sandbox import run_setup
from distutils import log
from distutils.debug import DEBUG
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if PY3 and not isinstance(path, _text_type):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
try:
cfg.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
return {}
if not cfg.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not cfg.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = cfg.getboolean('ah_bootstrap', option)
else:
value = cfg.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return False
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
with _silence():
run_setup(os.path.join(path, 'setup.py'),
['egg_info'])
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
req = DIST_NAME
attrs = {'setup_requires': [req]}
try:
if DEBUG:
_Distribution(attrs=attrs)
else:
with _silence():
_Distribution(attrs=attrs)
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBoostrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBoostrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, _text_type):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, _text_type):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
if sys.version_info[:2] < (2, 7):
# In Python 2.6 the distutils log does not log warnings, errors, etc. to
# stderr so we have to wrap it to ensure consistency at least in this
# module
import distutils
class log(object):
def __getattr__(self, attr):
return getattr(distutils.log, attr)
def warn(self, msg, *args):
self._log_to_stderr(distutils.log.WARN, msg, *args)
def error(self, msg):
self._log_to_stderr(distutils.log.ERROR, msg, *args)
def fatal(self, msg):
self._log_to_stderr(distutils.log.FATAL, msg, *args)
def log(self, level, msg, *args):
if level in (distutils.log.WARN, distutils.log.ERROR,
distutils.log.FATAL):
self._log_to_stderr(level, msg, *args)
else:
distutils.log.log(level, msg, *args)
def _log_to_stderr(self, level, msg, *args):
# This is the only truly 'public' way to get the current threshold
# of the log
current_threshold = distutils.log.set_threshold(distutils.log.WARN)
distutils.log.set_threshold(current_threshold)
if level >= current_threshold:
if args:
msg = msg % args
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
log = log()
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
| mit |
thorwhalen/ut | ml/prep/relabeling.py | 1 | 3943 |
from sklearn.cluster import SpectralClustering, AgglomerativeClustering, KMeans
from sklearn.neighbors import KNeighborsClassifier
from sklearn.base import ClusterMixin
from numpy import max, array
from scipy.spatial.distance import cdist
import itertools
class DataBasedLabeling(ClusterMixin):
pass
class KnnClusterDataBasedLabeling(DataBasedLabeling):
def __init__(self,
knn_classifier=KNeighborsClassifier(n_neighbors=10),
label_proba_matrix_exponent=1,
clusterer=SpectralClustering(n_clusters=7)):
self.knn_classifier = knn_classifier
self.label_proba_matrix_exponent = label_proba_matrix_exponent
if isinstance(clusterer, int):
n_clusters = clusterer
clusterer = SpectralClustering(n_clusters=n_clusters)
self.clusterer = clusterer
def fit(self, X, y):
self.y_ = y
self.knn_classifier.fit(X, self.y_)
self.clusterer.fit(self.label_weights_matrix(X))
return self
def label_weights_matrix(self, X):
label_weights_matrix = self.knn_classifier.predict_proba(X)
if self.label_proba_matrix_exponent == 0:
label_weights_matrix = (label_weights_matrix > 1 / len(self.knn_classifier.classes_)).astype(float)
else:
label_weights_matrix = label_weights_matrix ** self.label_proba_matrix_exponent
return label_weights_matrix
def fit_predict(self, X, y):
self.fit(X, y)
return self.clusterer.labels_
class BiDistanceDataBasedLabeling(DataBasedLabeling):
def __init__(self,
n_labels=7,
label_distance_weight=0.5,
label_distance='equal',
feature_distance='euclidean',
agglomerative_clustering_kwargs={}.copy(),
save_merged_distance_mat=False):
self.n_labels = n_labels
self.label_distance_weight = label_distance_weight
self.label_distance = label_distance
self.feature_distance = feature_distance
self.agglomerative_clustering_kwargs = agglomerative_clustering_kwargs
self.save_merged_distance_mat = save_merged_distance_mat
def fit(self, X, y):
if self.label_distance_weight > 1: # then normalize considering that feature weight is 1
self.label_distance_weight = self.label_distance_weight / (self.label_distance_weight + 1)
if isinstance(self.label_distance, str):
if self.label_distance == 'equal':
label_distance = lambda two_labels_tuple: float(two_labels_tuple[0] != two_labels_tuple[1])
else:
raise ValueError("Unknow label_distance: {}".format(self.label_distance))
if isinstance(self.feature_distance, str):
feature_distance = lambda pt_mat_1, pt_mat_2: cdist(pt_mat_1, pt_mat_2, metric=self.feature_distance)
feature_dist_mat = feature_distance(X, X)
feature_dist_mat /= max(feature_dist_mat)
label_distance_mat = array(list(map(label_distance, itertools.product(y, y)))) \
.reshape((len(y), len(y)))
label_distance_mat /= max(label_distance_mat)
merged_distance_mat = \
self.label_distance_weight * label_distance_mat \
+ (1 - self.label_distance_weight) * feature_dist_mat
self.clusterer_ = AgglomerativeClustering(n_clusters=self.n_labels,
affinity='precomputed',
linkage='complete',
**self.agglomerative_clustering_kwargs)
self.clusterer_.fit(merged_distance_mat)
if self.save_merged_distance_mat:
self.merged_distance_mat_ = merged_distance_mat
return self
def fit_predict(self, X, y):
self.fit(X, y)
return self.clusterer_.labels_
| mit |
tombstone/models | official/vision/detection/utils/object_detection/visualization_utils.py | 1 | 29252 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
import collections
import functools
from absl import logging
# Set headless-friendly backend.
import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
import tensorflow as tf
from official.vision.detection.utils import box_utils
from official.vision.detection.utils.object_detection import shape_utils
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.io.gfile.GFile(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def visualize_images_with_bounding_boxes(images, box_outputs, step,
summary_writer):
"""Records subset of evaluation images with bounding boxes."""
if not isinstance(images, list):
logging.warning('visualize_images_with_bounding_boxes expects list of '
'images but received type: %s and value: %s',
type(images), images)
return
image_shape = tf.shape(images[0])
image_height = tf.cast(image_shape[0], tf.float32)
image_width = tf.cast(image_shape[1], tf.float32)
normalized_boxes = box_utils.normalize_boxes(box_outputs,
[image_height, image_width])
bounding_box_color = tf.constant([[1.0, 1.0, 0.0, 1.0]])
image_summary = tf.image.draw_bounding_boxes(
tf.cast(images, tf.float32), normalized_boxes, bounding_box_color)
with summary_writer.as_default():
tf.summary.image('bounding_box_summary', image_summary, step=step)
summary_writer.flush()
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def _visualize_boxes(image, boxes, classes, scores, category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image, boxes, classes, scores, category_index=category_index, **kwargs)
def _visualize_boxes_and_masks(image, boxes, classes, scores, masks,
category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
**kwargs)
def _visualize_boxes_and_keypoints(image, boxes, classes, scores, keypoints,
category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
keypoints=keypoints,
**kwargs)
def _visualize_boxes_and_masks_and_keypoints(
image, boxes, classes, scores, masks, keypoints, category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
**kwargs)
def _resize_original_image(image, image_shape):
image = tf.expand_dims(image, 0)
image = tf.image.resize(
image, image_shape, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return tf.cast(tf.squeeze(image, 0), tf.uint8)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=None,
true_image_shape=None,
instance_masks=None,
keypoints=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
original_image_spatial_shape: [N, 2] tensor containing the spatial size of
the original image.
true_image_shape: [N, 3] tensor containing the spatial size of unpadded
original_image.
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4
}
if true_image_shape is None:
true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])
else:
true_shapes = true_image_shape
if original_image_spatial_shape is None:
original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])
else:
original_shapes = original_image_spatial_shape
if instance_masks is not None and keypoints is None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_masks,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores,
instance_masks
]
elif instance_masks is None and keypoints is not None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_keypoints,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores, keypoints
]
elif instance_masks is not None and keypoints is not None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_masks_and_keypoints,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores,
instance_masks, keypoints
]
else:
visualize_boxes_fn = functools.partial(
_visualize_boxes,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores
]
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
true_shape = image_and_detections[0]
original_shape = image_and_detections[1]
if true_image_shape is not None:
image = shape_utils.pad_or_clip_nd(
image_and_detections[2], [true_shape[0], true_shape[1], 3])
if original_image_spatial_shape is not None:
image_and_detections[2] = _resize_original_image(image, original_shape)
image_with_boxes = tf.compat.v1.py_func(visualize_boxes_fn,
image_and_detections[2:], tf.uint8)
return image_with_boxes
images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
return images
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color, fill=color)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with
values between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if np.any(np.logical_and(mask != 1, mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then
this function assumes that the boxes to be plotted are groundtruth
boxes and plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width] with
values ranging between 0 and 1, can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can
be None
use_normalized_coordinates: whether boxes is to be interpreted as
normalized coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw
all boxes.
min_score_thresh: minimum score threshold for a box to be visualized
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100*scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100*scores[i]))
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = STANDARD_COLORS[
classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image,
box_to_instance_masks_map[box],
color=color
)
if instance_boundaries is not None:
draw_mask_on_image_array(
image,
box_to_instance_boundaries_map[box],
color='red',
alpha=1.0
)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates)
return image
def add_cdf_image_summary(values, name):
"""Adds a tf.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32)
/ cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
cdf_plot = tf.compat.v1.py_func(cdf_plot, [values], tf.uint8)
tf.compat.v1.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a tf.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(
fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
hist_plot = tf.compat.v1.py_func(hist_plot, [values, bins], tf.uint8)
tf.compat.v1.summary.image(name, hist_plot)
| apache-2.0 |
mmottahedi/neuralnilm_prototype | scripts/e551.py | 2 | 7824 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation, MultiSource)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter, RectangularOutputPlotter, StartEndMeanPlotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from neuralnilm.rectangulariser import rectangularise
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity, softmax
from lasagne.objectives import squared_error, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer,
DimshuffleLayer, DropoutLayer, ConcatLayer, PadLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
# PATH = "/home/jack/experiments/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 25000
UKDALE_FILENAME = '/data/dk3810/ukdale.h5'
MAX_TARGET_POWER = 2400
TARGET_APPLIANCE = ['washer dryer', 'washing machine']
SEQ_LENGTH = 2048
N_SEQ_PER_BATCH = 64
TRAIN_BUILDINGS = [1, 2, 3, 4]
VALIDATION_BUILDINGS = [5]
SKIP_PROBABILITY_FOR_TARGET = 0.5
INDEPENDENTLY_CENTER_INPUTS = True
WINDOW_PER_BUILDING = {
1: ("2013-03-17", "2014-12-01"),
2: ("2013-05-22", "2013-10-01"),
3: ("2013-02-27", "2013-04-01"),
4: ("2013-03-09", "2013-09-20"),
5: ("2014-06-29", "2014-08-27")
}
INPUT_STATS = {
'mean': np.array([297.87216187], dtype=np.float32),
'std': np.array([374.43884277], dtype=np.float32)
}
real_appliance_source1 = RealApplianceSource(
filename=UKDALE_FILENAME,
appliances=[
TARGET_APPLIANCE,
['fridge freezer', 'fridge', 'freezer'],
'dish washer',
'kettle',
'microwave'
],
max_appliance_powers=[MAX_TARGET_POWER, 300, 2500, 2600, 1500],
on_power_thresholds=[5] * 5,
min_on_durations=[1800, 60, 1800, 12, 12],
min_off_durations=[600, 12, 1800, 12, 12],
divide_input_by_max_input_power=False,
window_per_building=WINDOW_PER_BUILDING,
seq_length=SEQ_LENGTH,
output_one_appliance=True,
train_buildings=TRAIN_BUILDINGS,
validation_buildings=VALIDATION_BUILDINGS,
n_seq_per_batch=N_SEQ_PER_BATCH,
skip_probability=0.75,
skip_probability_for_first_appliance=SKIP_PROBABILITY_FOR_TARGET,
target_is_start_and_end_and_mean=True,
standardise_input=True,
input_stats=INPUT_STATS,
independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS
)
same_location_source1 = SameLocation(
filename=UKDALE_FILENAME,
target_appliance=TARGET_APPLIANCE,
window_per_building=WINDOW_PER_BUILDING,
seq_length=SEQ_LENGTH,
train_buildings=TRAIN_BUILDINGS,
validation_buildings=VALIDATION_BUILDINGS,
n_seq_per_batch=N_SEQ_PER_BATCH,
skip_probability=SKIP_PROBABILITY_FOR_TARGET,
target_is_start_and_end_and_mean=True,
standardise_input=True,
offset_probability=1,
divide_target_by=MAX_TARGET_POWER,
input_stats=INPUT_STATS,
independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS
)
multi_source = MultiSource(
sources=[
{
'source': real_appliance_source1,
'train_probability': 0.5,
'validation_probability': 0
},
{
'source': same_location_source1,
'train_probability': 0.5,
'validation_probability': 1
}
],
standardisation_source=same_location_source1
)
def only_train_on_real_data(net, iteration):
net.logger.info(
"Iteration {}: Now only training on real data.".format(iteration))
net.source.sources[0]['train_probability'] = 0.0
net.source.sources[1]['train_probability'] = 1.0
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=lambda x, t: squared_error(x, t).mean(),
updates_func=nesterov_momentum,
learning_rate=1e-5,
learning_rate_changes_by_iteration={
400000: 1e-6,
500000: 1e-7
},
epoch_callbacks={
# 350000: only_train_on_real_data
},
do_save_activations=True,
auto_reshape=False,
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': PadLayer,
'width': 4
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1), # back to (batch, time, features)
'label': 'dimshuffle3'
},
{
'type': DenseLayer,
'num_units': 512 * 8,
'nonlinearity': rectify,
'label': 'dense0'
},
{
'type': DenseLayer,
'num_units': 512 * 6,
'nonlinearity': rectify,
'label': 'dense1'
},
{
'type': DenseLayer,
'num_units': 512 * 4,
'nonlinearity': rectify,
'label': 'dense2'
},
{
'type': DenseLayer,
'num_units': 512,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 3,
'nonlinearity': None
}
]
)
def exp_a(name):
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=multi_source,
plotter=StartEndMeanPlotter(
n_seq_to_plot=32, max_target_power=MAX_TARGET_POWER)
))
net = Net(**net_dict_copy)
return net
def main():
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e551.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mit |
agentfog/qiime | scripts/print_qiime_config.py | 15 | 35150 | #!/usr/bin/env python
from __future__ import division
__author__ = "Jens Reeder"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Jens Reeder", "Dan Knights", "Antonio Gonzalez Pena",
"Justin Kuczynski", "Jai Ram Rideout", "Greg Caporaso",
"Emily TerAvest"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
import re
from os import access, X_OK, R_OK, W_OK, getenv, environ, remove, devnull
from os.path import isdir, exists, split, join
from sys import platform, version as python_version, executable, stdout
from unittest import TestLoader, TextTestRunner, TestCase
from shutil import rmtree
from subprocess import Popen, PIPE, STDOUT
from optparse import SUPPRESS_HELP
core_dependency_missing_msg = "See the QIIME Installation Guide: http://qiime.org/install/install.html"
try:
from numpy import __version__ as numpy_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from scipy import __version__ as scipy_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from tempfile import mkdtemp
from skbio.util import remove_files
from burrito.util import ApplicationNotFoundError, ApplicationError
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from qiime.parse import parse_qiime_config_file
from qiime.util import (load_qiime_config,
get_qiime_project_dir,
get_qiime_library_version,
get_qiime_scripts_dir,
get_rdp_jarpath,
get_java_version,
get_pynast_version,
parse_command_line_parameters,
make_option,
qiime_system_call,
get_qiime_temp_dir)
from qiime.denoiser.utils import check_flowgram_ali_exe
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from biom import __version__ as biom_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from qcli import __version__ as qcli_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from pyqi import __version__ as pyqi_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from qiime_default_reference import __version__ as qdr_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from skbio import __version__ as skbio_lib_version
from burrito.util import which
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from pandas import __version__ as pandas_lib_version
except ImportError:
pandas_lib_version = "Not installed."
try:
from matplotlib import __version__ as matplotlib_lib_version
except ImportError:
matplotlib_lib_version = "Not installed."
try:
from emperor import __version__ as emperor_lib_version
except ImportError:
emperor_lib_version = "Not installed."
try:
from burrito import __version__ as burrito_lib_version
except ImportError:
burrito_lib_version = "Not installed."
# current release of bfillings doesn't have __version__. if it gets added in
# future releases, display that info, otherwise just indicate whether it's
# installed or not
try:
import bfillings
bfillings_lib_version = bfillings.__version__
except ImportError:
bfillings_lib_version = "Not installed."
except AttributeError:
bfillings_lib_version = "Installed."
# gdata doesn't have __version__ and adding that is outside of our control.
# just indicate whether it's installed or not
try:
import gdata
except ImportError:
gdata_installed = "Not installed."
else:
gdata_installed = "Installed."
try:
import h5py
h5py_lib_version = (
h5py.__version__ + ' (HDF5 version: %s)' % h5py.version.hdf5_version)
except ImportError:
h5py_lib_version = "Not installed."
pynast_lib_version = get_pynast_version()
if pynast_lib_version is None:
pynast_lib_version = "Not installed."
if which('sortmerna') is None:
sortmerna_lib_version = "Not installed."
else:
_, serr, _ = qiime_system_call("sortmerna --version")
sortmerna_lib_version = serr.strip()
if which('sumaclust') is None:
sumaclust_lib_version = "Not installed."
else:
sout, _, _ = qiime_system_call("sumaclust --help")
sout_lines = sout.split('\n')
sumaclust_lib_version = "Installed, but can't identify version."
for e in sout_lines:
e = e.strip()
if e.startswith('SUMACLUST Version'):
sumaclust_lib_version = e
break
if which('swarm') is None:
swarm_lib_version = "Not installed."
else:
_, serr, return_value = qiime_system_call("swarm --version")
serr = serr.strip()
if serr:
swarm_lib_version = serr.split('\n')[0]
else:
swarm_lib_version = "Installed, but can't identify version."
script_info = {}
script_info['brief_description'] = ("Print and optionally test QIIME "
"configuration details")
script_info['script_description'] = ("Print QIIME configuration details and "
"optionally perform tests of the QIIME "
"base or full install.")
script_info['script_usage'] = []
script_info['script_usage'].append(
("Example 1",
"Print basic QIIME configuration details:", """%prog"""))
script_info['script_usage'].append(
("Example 2",
"Print basic QIIME configuration details and test the base QIIME installation:",
"%prog -t"))
script_info['script_usage'].append(
("Example 3",
"Print basic QIIME configuration details and test the full QIIME installation:",
"%prog -tf"))
script_info['output_description'] = ("Prints QIIME configuration details to "
"standard output.")
script_info['version'] = __version__
script_info['help_on_no_arguments'] = False
script_info['required_options'] = []
script_info['optional_options'] = [
make_option('-t', '--test', action='store_true', default=False,
help='Test the QIIME install and configuration '
'[default: %default]'),
make_option('-b', '--qiime_base_install', action='store_true',
default=True, help=SUPPRESS_HELP),
make_option('-f', '--qiime_full_install', action='store_true',
default=False, help='If passed, report on dependencies required for the '
'QIIME full install. To perform tests of the QIIME '
'full install, you must also pass -t. '
'[default: %default]'),
make_option('--haiku',
action='store_true',
default=False,
help=SUPPRESS_HELP)
]
class QIIMEConfig(TestCase):
def setUp(self):
self.config = load_qiime_config()
def test_cluster_jobs_fp(self):
"""cluster_jobs_fp is set to a valid path and is executable"""
fp = self.config["cluster_jobs_fp"]
if fp:
full_path = which(fp)
if full_path:
fp = full_path
# test if file exists or is in $PATH
self.assertTrue(exists(fp),
"cluster_jobs_fp set to an invalid file path or is not in $PATH: %s" % fp)
modes = {R_OK: "readable",
W_OK: "writable",
X_OK: "executable"}
# test if file readable
self.assertTrue(access(fp, X_OK),
"cluster_jobs_fp is not %s: %s" % (modes[X_OK], fp))
def test_blastmat_dir(self):
"""blastmat_dir is set to a valid path."""
test_qiime_config_variable("blastmat_dir", self.config, self)
def test_pynast_template_alignment_fp(self):
"""pynast_template_alignment, if set, is set to a valid path"""
test_qiime_config_variable("pynast_template_alignment_fp",
self.config, self)
def test_pynast_template_alignment_blastdb_fp(self):
"""pynast_template_alignment_blastdb, if set, is set to a valid path"""
test_qiime_config_variable("pynast_template_alignment_blastdb_fp",
self.config, self)
def test_pynast_template_alignment_blastdb_fp(self):
"""pynast_template_alignment_blastdb, if set, is set to a valid path"""
test_qiime_config_variable("pynast_template_alignment_blastdb_fp",
self.config, self)
def test_get_qiime_scripts_dir(self):
"""Test that we can find the directory containing QIIME scripts."""
# get_qiime_scripts_dir will raise an error if it can't find a scripts
# directory.
scripts_dir = get_qiime_scripts_dir()
self.assertTrue(isdir(scripts_dir), "The QIIME scripts directory does "
"not exist: %s" % scripts_dir)
def test_temp_dir(self):
"""temp_dir is set to a valid path"""
temp_dir = get_qiime_temp_dir()
self.assertTrue(exists(temp_dir),
"temp_dir does not exist: %s" % temp_dir)
self.assertTrue(isdir(temp_dir),
"temp_dir is not a directory: %s" % temp_dir)
self.assertTrue(access(temp_dir, W_OK),
"temp_dir is not writable: %s" % temp_dir)
# we are not testing these values from the qiime_config:
# jobs_to_start 1
# seconds_to_sleep 60
def test_for_unrecognized_values(self):
"""qiime_config has no extra values"""
error_msg_fragment = (" contains unrecognized values:\n%s\nYou can "
"safely remove these values from your QIIME "
"config file as they will be ignored by QIIME.")
qiime_project_dir = get_qiime_project_dir()
orig_config = parse_qiime_config_file(open(qiime_project_dir +
'/qiime/support_files/qiime_config'))
# check the env qiime_config
qiime_config_env_filepath = getenv('QIIME_CONFIG_FP')
if qiime_config_env_filepath:
qiime_config_via_env = parse_qiime_config_file(
open(qiime_config_env_filepath))
extra_vals = []
for key in qiime_config_via_env:
if key not in orig_config:
extra_vals.append(key)
if extra_vals:
self.fail("The QIIME config file set via the QIIME_CONFIG_FP "
"environment variable" +
error_msg_fragment % ", ".join(extra_vals))
# check the qiime_config in $HOME/.qiime_config
home_dir = getenv('HOME')
if (exists(home_dir + "/.qiime_config")):
qiime_config_home = parse_qiime_config_file(
open(home_dir + "/.qiime_config"))
extra_vals = []
for key in qiime_config_home:
if key not in orig_config:
extra_vals.append(key)
if extra_vals:
self.fail("The .qiime_config in your HOME" +
error_msg_fragment % ", ".join(extra_vals))
class QIIMEDependencyBase(QIIMEConfig):
def test_uclust_supported_version(self):
"""uclust is in path and version is supported """
acceptable_version = (1, 2, 22)
self.assertTrue(which('uclust'),
"uclust not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = 'uclust --version'
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split('v')[-1].strip('q')
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported uclust version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_FastTree_supported_version(self):
"""FastTree is in path and version is supported """
acceptable_version = (2, 1, 3)
self.assertTrue(which('FastTree'),
"FastTree not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
# If FastTree is run interactively, it outputs the following line:
# Usage for FastTree version 2.1.3 SSE3:
#
# If run non-interactively:
# FastTree Version 2.1.3 SSE3
command = "FastTree 2>&1 > %s | grep -i version" % devnull
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read().strip()
version_str_matches = re.findall('ersion\s+(\S+)\s+', stdout)
self.assertEqual(len(version_str_matches), 1,
"Could not find FastTree version info in usage text "
"'%s'." % stdout)
version_str = version_str_matches[0]
try:
version = tuple(map(int, version_str.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
acceptable_version_str = '.'.join(map(str, acceptable_version))
self.assertTrue(pass_test,
"Unsupported FastTree version. %s is required, but "
"running %s." % (acceptable_version_str, version_str))
class QIIMEDependencyFull(QIIMEDependencyBase):
def test_ampliconnoise_install(self):
""" AmpliconNoise install looks sane."""
url = "http://qiime.org/install/install.html#ampliconnoise-install-notes"
pyro_lookup_file = getenv('PYRO_LOOKUP_FILE')
self.assertTrue(pyro_lookup_file is not None,
"$PYRO_LOOKUP_FILE variable is not set. See %s for help." % url)
self.assertTrue(exists(pyro_lookup_file),
"$PYRO_LOOKUP_FILE variable is not set to an existing filepath.")
seq_lookup_file = getenv('SEQ_LOOKUP_FILE')
self.assertTrue(seq_lookup_file is not None,
"$SEQ_LOOKUP_FILE variable is not set. See %s for help." % url)
self.assertTrue(exists(seq_lookup_file),
"$SEQ_LOOKUP_FILE variable is not set to an existing filepath.")
self.assertTrue(which("SplitKeys.pl"),
"Couldn't find SplitKeys.pl. " +
"Perhaps AmpliconNoise Scripts directory isn't in $PATH?" +
" See %s for help." % url)
self.assertTrue(which("FCluster"),
"Couldn't find FCluster. " +
"Perhaps the AmpliconNoise bin directory isn't in $PATH?" +
" See %s for help." % url)
self.assertTrue(which("Perseus"),
"Couldn't find Perseus. " +
"Perhaps the AmpliconNoise bin directory isn't in $PATH?" +
" See %s for help." % url)
def test_sourcetracker_installed(self):
"""sourcetracker is installed"""
sourcetracker_path = getenv('SOURCETRACKER_PATH')
self.assertNotEqual(sourcetracker_path, None,
("SOURCETRACKER_PATH is not set. This is "
"only important if you plan to use SourceTracker."))
self.assertTrue(exists(sourcetracker_path),
"SOURCETRACKER_PATH is not set to a valid path: %s" %
sourcetracker_path)
def test_chimeraSlayer_install(self):
"""no obvious problems with ChimeraSlayer install """
# The ChimerSalyer app requires that all its components are installed
# relative to the main program ChimeraSlayer.pl.
# We therefore check that at least one the files is there.
# However, if the directory structure of ChimeraSlayer changes, this test will most
# likely fail as well and need to be updated.
# Tested with the version of microbiomeutil_2010-04-29
chim_slay = which("ChimeraSlayer.pl")
self.assertTrue(chim_slay, "ChimeraSlayer was not found in your $PATH")
dir, app_name = split(chim_slay)
self.assertTrue(
exists(dir + "/ChimeraParentSelector/chimeraParentSelector.pl"),
"ChimeraSlayer depends on external files in directoryies relative to its "
"install directory. These do not appear to be present.")
def test_blastall_fp(self):
"""blastall_fp is set to a valid path"""
blastall = self.config["blastall_fp"]
if not self.config["blastall_fp"].startswith("/"):
# path is relative, figure out absolute path
blast_all = which(blastall)
if not blast_all:
raise ApplicationNotFoundError(
"blastall_fp set to %s, but is not in your PATH. Either use an absolute path to or put it in your PATH." %
blastall)
self.config["blastall_fp"] = blast_all
test_qiime_config_variable("blastall_fp", self.config, self, X_OK)
def test_blast_supported_version(self):
"""blast is in path and version is supported """
acceptable_version = (2, 2, 22)
self.assertTrue(which('blastall'),
"blast not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = 'blastall | grep blastall'
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[1].strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported blast version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_cdbtools_supported_version(self):
"""cdbtools is in path and version is supported """
acceptable_version = (0, 99)
self.assertTrue(which('cdbfasta'),
"cdbtools not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "cdbfasta -v"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[2].strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported cdbtools version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_INFERNAL_supported_version(self):
"""INFERNAL is in path and version is supported """
acceptable_version = (1, 0, 2)
self.assertTrue(which('cmbuild'),
"Infernal not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "cmbuild -h | grep INF"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[2].strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported INFERNAL version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_muscle_supported_version(self):
"""muscle is in path and version is supported """
acceptable_version = (3, 8, 31)
self.assertTrue(which('muscle'),
"muscle not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "muscle -version"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[1].strip('v')
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported muscle version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_mothur_supported_version(self):
"""mothur is in path and version is supported """
acceptable_version = (1, 25, 0)
self.assertTrue(which('mothur'),
"mothur not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
# mothur creates a log file in cwd, so create a tmp and cd there first
log_file = join(get_qiime_temp_dir(), 'mothur.log')
command = "mothur \"#set.logfile(name=%s)\" | grep '^mothur v'" % log_file
stdout, stderr, exit_Status = qiime_system_call(command)
# remove log file
remove_files([log_file], error_on_missing=False)
version_string = stdout.strip().split(' ')[1].strip('v.')
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported mothur version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_denoiser_supported_version(self):
"""denoiser aligner is ready to use """
pass_test = True
try:
check_flowgram_ali_exe()
except (ApplicationNotFoundError, ApplicationError):
pass_test = False
self.assertTrue(pass_test,
"Denoiser flowgram aligner not found or not "
"executable. This may or may not be a problem "
"depending on which components of QIIME you plan to "
"use.")
def test_raxmlHPC_supported_version(self):
"""raxmlHPC is in path and version is supported """
acceptable_version = [(7, 3, 0), (7, 3, 0)]
self.assertTrue(which('raxmlHPC'),
"raxmlHPC not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "raxmlHPC -v | grep version"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[4].strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version in acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported raxmlHPC version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_clearcut_supported_version(self):
"""clearcut is in path and version is supported """
acceptable_version = (1, 0, 9)
self.assertTrue(which('clearcut'),
"clearcut not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "clearcut -V"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[2].strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported clearcut version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_cdhit_supported_version(self):
"""cd-hit is in path and version is supported """
self.assertTrue(which('cd-hit'),
"cd-hit not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
# cd-hit does not have a version print in their program
def test_rtax_supported_version(self):
"""rtax is in path and version is supported """
acceptable_version = [(0, 984)]
self.assertTrue(which('rtax'),
"rtax not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "rtax 2>&1 > %s | grep Version | awk '{print $2}'" % devnull
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version in acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported rtax version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_usearch_supported_version(self):
"""usearch is in path and version is supported """
acceptable_version = [(5, 2, 236), (5, 2, 236)]
self.assertTrue(which('usearch'),
"usearch not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "usearch --version"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.split('v')[1]
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version in acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported usearch version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_R_supported_version(self):
"""R is in path and version is supported """
minimum_version = (2, 12, 0)
self.assertTrue(which('R'),
"R not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "R --version | grep 'R version' | awk '{print $3}'"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = False
if version[0] == minimum_version[0]:
if version[1] == minimum_version[1]:
if version[2] >= minimum_version[2]:
pass_test = True
elif version[1] > minimum_version[1]:
pass_test = True
elif version[0] > minimum_version[0]:
pass_test = True
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported R version. %s or greater is required, but running %s."
% ('.'.join(map(str, minimum_version)), version_string))
def test_gdata_install(self):
"""gdata is installed"""
# We currently can't programmatically find the version of gdata. An
# issue has been created alerting the gdata devs.
pass_test = True
try:
import gdata
except ImportError:
pass_test = False
self.assertTrue(pass_test, "gdata is not installed.")
def test_h5py(self):
"""h5py is installed"""
self.assertTrue(h5py_lib_version != "Not installed.",
"h5py is not installed. You should install this for "
"improved performance with large BIOM files or if "
"working with BIOM format version 2.x files. For "
"more information, see "
"http://qiime.org/documentation/file_formats.html#biom-file-format-versions")
def test_qiime_config_variable(variable, qiime_config, test,
access_var=R_OK, fail_on_missing=False):
"""test if a variable is set and set to a readable path."""
fp = qiime_config[variable]
if not fp:
if fail_on_missing:
test.fail("%s not set." % variable)
else:
# non-essential file, so do not fail
return
# test if file exists
test.assertTrue(exists(fp), "%s set to an invalid file path: %s" %
(variable, fp))
modes = {R_OK: "readable",
W_OK: "writable",
X_OK: "executable"}
# test if file readable
test.assertTrue(access(fp, access_var),
"%s is not %s: %s" % (variable, modes[access_var], fp))
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
if opts.haiku:
print "QIIME provides insight\nmicrobial in nature\nto ecology"
exit(0)
qiime_config = load_qiime_config()
test = opts.test
qiime_full_install = opts.qiime_full_install
rdp_jarpath = get_rdp_jarpath()
if rdp_jarpath is None:
rdp_version = "Not installed."
else:
rdp_version = split(rdp_jarpath)[1]
java_version = get_java_version()
if java_version is None:
java_version = "Not installed."
system_info = [
("Platform", platform),
("Python version", python_version.replace('\n', ' ')),
("Python executable", executable)]
max_len = max([len(e[0]) for e in system_info])
print "\nSystem information"
print "=================="
for v in system_info:
print "%*s:\t%s" % (max_len, v[0], v[1])
print "\nQIIME default reference information"
print "==================================="
print "For details on what files are used as QIIME's default references, see here:"
print " https://github.com/biocore/qiime-default-reference/releases/tag/%s" % qdr_lib_version
version_info = [
("QIIME library version", get_qiime_library_version()),
("QIIME script version", __version__),
("qiime-default-reference version", qdr_lib_version),
("NumPy version", numpy_lib_version),
("SciPy version", scipy_lib_version),
("pandas version", pandas_lib_version),
("matplotlib version", matplotlib_lib_version),
("biom-format version", biom_lib_version),
("h5py version", h5py_lib_version),
("qcli version", qcli_lib_version),
("pyqi version", pyqi_lib_version),
("scikit-bio version", skbio_lib_version),
("PyNAST version", pynast_lib_version),
("Emperor version", emperor_lib_version),
("burrito version", burrito_lib_version),
("burrito-fillings version", bfillings_lib_version),
("sortmerna version", sortmerna_lib_version),
("sumaclust version", sumaclust_lib_version),
("swarm version", swarm_lib_version),
("gdata", gdata_installed)
]
if qiime_full_install:
version_info += [
("RDP Classifier version (if installed)", rdp_version),
("Java version (if installed)", java_version)]
max_len = max([len(e[0]) for e in version_info])
print "\nDependency versions"
print "==================="
for v in version_info:
print "%*s:\t%s" % (max_len, v[0], v[1])
print "\nQIIME config values"
print "==================="
print "For definitions of these settings and to learn how to configure QIIME, see here:"
print " http://qiime.org/install/qiime_config.html"
print " http://qiime.org/tutorials/parallel_qiime.html\n"
max_len = max([len(key) for key in qiime_config])
for key, value in qiime_config.items():
print "%*s:\t%s" % (max_len, key, value)
if test:
if qiime_full_install:
print "\nQIIME full install test results"
print "==============================="
suite = TestLoader().loadTestsFromTestCase(QIIMEDependencyFull)
else:
print "\nQIIME base install test results"
print "==============================="
suite = TestLoader().loadTestsFromTestCase(QIIMEDependencyBase)
if opts.verbose:
verbosity = 2
else:
verbosity = 1
TextTestRunner(stream=stdout, verbosity=verbosity).run(suite)
if __name__ == "__main__":
main()
| gpl-2.0 |
schets/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 221 | 5517 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
vivekmishra1991/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>,
# Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
marcocaccin/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
liuwenf/moose | modules/porous_flow/doc/tests/mandel.py | 6 | 8844 | #!/usr/bin/env python
# Solution to Mandel's problem as presented in
# AHD Cheng and E Detournay "A direct boundary element method for plane strain poroelasticity" International Journal of Numerical and Analytical Methods in Geomechanics 12 (1988) 551-572
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected(x, t):
# expected solution at time t and position x
# input parameters
soil_width = 1.0
soil_height = 0.1
soil_lame_lambda = 0.5
soil_lame_mu = 0.75
fluid_bulk_modulus = 8.0
initial_porosity = 0.1
biot_coeff = 0.6
fluid_mobility = 1.5
normal_stress = 1.0
# derived parameters
soil_shear_modulus = soil_lame_mu
soil_drained_bulk = soil_lame_lambda + 2.0 * soil_lame_mu / 3.0
fluid_bulk_compliance = 1.0 / fluid_bulk_modulus
biot_modulus = 1.0 / (initial_porosity / fluid_bulk_modulus + (biot_coeff - initial_porosity) * (1.0 - biot_coeff) / soil_drained_bulk)
undrained_bulk_modulus = soil_drained_bulk + biot_coeff**2 * biot_modulus
skempton = biot_coeff * biot_modulus / undrained_bulk_modulus
drained_poisson = (3.0 * soil_drained_bulk - 2.0 * soil_shear_modulus) / (6.0 * soil_drained_bulk + 2.0 * soil_shear_modulus)
undrained_poisson = (3.0 * undrained_bulk_modulus - 2.0 * soil_shear_modulus) / (6.0 * undrained_bulk_modulus + 2.0 * soil_shear_modulus)
consolidation_coeff = 2.0 * fluid_mobility * skempton**2 * soil_shear_modulus * (1.0 - drained_poisson) * (1 + undrained_poisson)**2 / 9.0 / (1.0 - undrained_poisson) / (undrained_poisson - drained_poisson)
roots = [1.419988120304100E+00, 4.666177581823210E+00, 7.826417353528760E+00, 1.097591703059930E+01, 1.412188800507350E+01, 1.726626279765500E+01, 2.040978005325610E+01, 2.355278342938330E+01, 2.669545454962390E+01, 2.983789845132980E+01, 3.298018011077390E+01, 3.612234188229790E+01]
expr1 = [np.sin(v) / (v - np.sin(v) * np.cos(v)) for v in roots]
expr2 = [np.sin(v) * np.cos(v) / (v - np.sin(v) * np.cos(v)) for v in roots]
d_terms = [expr2[i] * np.exp(- roots[i]**2 * consolidation_coeff * t / soil_width**2) for i in range(len(roots))]
# following is for t = 0
# vert_disp = - normal_stress * soil_height * (1.0 - undrained_poisson) / 2.0 / soil_shear_modulus / soil_width
# hor_disp = normal_stress * undrained_poisson / 2.0 / soil_shear_modulus
vert_disp = - normal_stress * (1.0 - drained_poisson) * soil_height / 2.0 / soil_shear_modulus / soil_width + normal_stress * (1.0 - undrained_poisson) * soil_height / soil_shear_modulus / soil_width * sum(d_terms)
hor_disp = normal_stress * drained_poisson / 2.0 / soil_shear_modulus + normal_stress * (1.0 - undrained_poisson) / soil_shear_modulus * sum(d_terms)
p_terms = [(expr1[i] * np.cos(roots[i] * x / soil_width) - expr2[i]) * np.exp(- (roots[i] / soil_width)**2 * consolidation_coeff * t) for i in range(len(roots))]
porepressure = 2.0 * normal_stress * skempton * (1.0 + undrained_poisson) / 3.0 / soil_width * sum(p_terms)
return (vert_disp, hor_disp, porepressure)
def get_moose_results(fi):
f = open(fi)
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
f.close()
t = [d[0] for d in data]
p0 = [d[1] for d in data]
p3 = [d[4] for d in data]
p8 = [d[9] for d in data]
force = [d[12] for d in data]
xdisp = [d[13] for d in data]
ydisp = [d[14] for d in data]
return (t, p0, p3, p8, force, xdisp, ydisp)
tpoints = np.arange(1E-5, 0.71, 1E-3)
moose_hm = get_moose_results("../../tests/poro_elasticity/gold/mandel.csv")
moose_constM = get_moose_results("../../tests/poro_elasticity/gold/mandel_constM.csv")
moose_fs = get_moose_results("../../tests/poro_elasticity/gold/mandel_fully_saturated.csv")
moose_fsv = get_moose_results("../../tests/poro_elasticity/gold/mandel_fully_saturated_volume.csv")
plt.figure()
plt.plot(tpoints, expected(0.0, tpoints)[2], 'k-', linewidth = 2.0, label = 'expected, x = 0')
plt.plot(tpoints, expected(0.3, tpoints)[2], 'r-', linewidth = 2.0, label = 'expected, x = 0.3')
plt.plot(tpoints, expected(0.8, tpoints)[2], 'b-', linewidth = 2.0, label = 'expected, x = 0.8')
plt.plot(moose_hm[0], moose_hm[1], 'ks', markersize = 6.0, label = 'MOOSE HM, x = 0')
plt.plot(moose_hm[0], moose_hm[2], 'rs', markersize = 6.0, label = 'MOOSE HM, x = 0.3')
plt.plot(moose_hm[0], moose_hm[3], 'bs', markersize = 6.0, label = 'MOOSE HM, x = 0.8')
plt.legend(loc = 'upper right')
plt.xlabel("time")
plt.ylabel("Porepressure")
plt.title("Mandel's problem: Porepressure at points in the sample")
#plt.axis([0, 100, 199, 301])
plt.savefig("mandel_HM.pdf")
plt.figure()
plt.plot(tpoints, expected(0.0, tpoints)[2], 'k-', linewidth = 2.0, label = 'expected, x = 0')
plt.plot(tpoints, expected(0.3, tpoints)[2], 'r-', linewidth = 2.0, label = 'expected, x = 0.3')
plt.plot(tpoints, expected(0.8, tpoints)[2], 'b-', linewidth = 2.0, label = 'expected, x = 0.8')
plt.plot(moose_constM[0], moose_constM[1], 'ks', markersize = 6.0, label = 'MOOSE ConstM, x = 0')
plt.plot(moose_constM[0], moose_constM[2], 'rs', markersize = 6.0, label = 'MOOSE ConstM, x = 0.3')
plt.plot(moose_constM[0], moose_constM[3], 'bs', markersize = 6.0, label = 'MOOSE ConstM, x = 0.8')
plt.legend(loc = 'upper right')
plt.xlabel("time")
plt.ylabel("Porepressure")
plt.title("Mandel's problem: Porepressure at points in the sample")
#plt.axis([0, 100, 199, 301])
plt.savefig("mandel_ConstM.pdf")
plt.figure()
plt.plot(tpoints, expected(0.0, tpoints)[2], 'k-', linewidth = 2.0, label = 'expected, x = 0')
plt.plot(tpoints, expected(0.3, tpoints)[2], 'r-', linewidth = 2.0, label = 'expected, x = 0.3')
plt.plot(tpoints, expected(0.8, tpoints)[2], 'b-', linewidth = 2.0, label = 'expected, x = 0.8')
plt.plot(moose_fs[0], moose_fs[1], 'ks', markersize = 6.0, label = 'MOOSE FullySat, x = 0')
plt.plot(moose_fs[0], moose_fs[2], 'rs', markersize = 6.0, label = 'MOOSE FullySat, x = 0.3')
plt.plot(moose_fs[0], moose_fs[3], 'bs', markersize = 6.0, label = 'MOOSE FullySat, x = 0.8')
plt.legend(loc = 'upper right')
plt.xlabel("time")
plt.ylabel("Porepressure")
plt.title("Mandel's problem: Porepressure at points in the sample")
#plt.axis([0, 100, 199, 301])
plt.savefig("mandel_FS.pdf")
plt.figure()
plt.plot(tpoints, expected(0.0, tpoints)[2], 'k-', linewidth = 2.0, label = 'expected, x = 0')
plt.plot(tpoints, expected(0.3, tpoints)[2], 'r-', linewidth = 2.0, label = 'expected, x = 0.3')
plt.plot(tpoints, expected(0.8, tpoints)[2], 'b-', linewidth = 2.0, label = 'expected, x = 0.8')
plt.plot(moose_fsv[0], moose_fsv[1], 'ks', markersize = 6.0, label = 'MOOSE FullySatVol, x = 0')
plt.plot(moose_fsv[0], moose_fsv[2], 'rs', markersize = 6.0, label = 'MOOSE FullySatVol, x = 0.3')
plt.plot(moose_fsv[0], moose_fsv[3], 'bs', markersize = 6.0, label = 'MOOSE FullySatVol, x = 0.8')
plt.legend(loc = 'upper right')
plt.xlabel("time")
plt.ylabel("Porepressure")
plt.title("Mandel's problem: Porepressure at points in the sample")
#plt.axis([0, 100, 199, 301])
plt.savefig("mandel_FSV.pdf")
plt.figure()
plt.plot(tpoints, expected(0.0, tpoints)[1], 'k-', linewidth = 2.0, label = 'expected')
plt.plot(moose_hm[0], moose_hm[5], 'ks', markersize = 6.0, label = 'MOOSE HM')
plt.plot(moose_constM[0], moose_constM[5], 'gx', markersize = 9.0, label = 'MOOSE ConstM')
plt.plot(moose_fs[0], moose_fs[5], 'b^', markersize = 6.0, label = 'MOOSE FullySat')
plt.plot(moose_fsv[0], moose_fsv[5], 'r*', markersize = 4.0, label = 'MOOSE FullySatVol')
plt.legend(loc = 'upper right')
plt.xlabel("time")
plt.ylabel("Displacement")
plt.title("Mandel's problem: Platten horizontal displacement")
plt.savefig("mandel_hor_disp.pdf")
plt.figure()
plt.plot(tpoints, expected(0.0, tpoints)[0], 'k-', linewidth = 2.0, label = 'expected')
plt.plot(moose_hm[0], moose_hm[6], 'ks', markersize = 6.0, label = 'MOOSE HM')
plt.plot(moose_constM[0], moose_constM[6], 'gx', markersize = 9.0, label = 'MOOSE ConstM')
plt.plot(moose_fs[0], moose_fs[6], 'b^', markersize = 6.0, label = 'MOOSE FullySat')
plt.plot(moose_fsv[0], moose_fsv[6], 'r*', markersize = 4.0, label = 'MOOSE FullySatVol')
plt.legend(loc = 'upper right')
plt.xlabel("time")
plt.ylabel("Displacement")
plt.title("Mandel's problem: Platten vertical displacement")
plt.savefig("mandel_ver_disp.pdf")
plt.figure()
plt.plot(moose_hm[0], moose_hm[4], 'k-', markersize = 6.0, label = 'MOOSE HM')
plt.plot(moose_constM[0], moose_constM[4], 'g-', markersize = 9.0, label = 'MOOSE ConstM')
plt.plot(moose_fs[0], moose_fs[4], 'b-', markersize = 6.0, label = 'MOOSE FullySat')
plt.plot(moose_fsv[0], moose_fsv[4], 'r-', markersize = 4.0, label = 'MOOSE FullySatVol')
plt.legend(loc = 'upper right')
plt.xlabel("time")
plt.ylabel("Force")
plt.title("Mandel's problem: Total downwards force")
plt.savefig("mandel_force.pdf")
sys.exit(0)
| lgpl-2.1 |
NeuroDataDesign/seelviz | Tony/clviz_web_tony_edits/densitygraph.py | 1 | 12529 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from __future__ import print_function
__author__ = 'seelviz'
# import matplotlib as mpl
# mpl.use('Agg')
from skimage import data, img_as_float
from skimage import exposure
import plotly
from plotly.graph_objs import *
import cv2
import collections as col
import math, os, gc
import numpy as np
import nibabel as nib
# Tony's get_brain_figure stuff
from plotly.offline import download_plotlyjs#, init_notebook_mode, iplot
from plotly import tools
#plotly.offline.init_notebook_mode()
import networkx as nx
import pandas as pd
import re
class densitygraph(object):
"""This class includes all the calculations nad operations necessary to go from a graphml of the brain to a graph that includes edges and is colored according to density of nodes."""
#def generate_density_graph(self):
#def get_brain_figure(self, g, plot_title=''):
#def generate_heat_map(self):
def __init__(self, token, graph_path = None):
self._token = token
if graph_path == None:
self._graph = nx.read_graphml(token + '/' + token + '.graphml')
else:
self._graph = graph_path
self._sortedList = None
self._maxEdges = 0
self._scaledEdges = 0
self._heatmapbrain = None
def generate_density_graph(self):
## This finds the maximum number of edges and the densest node.
G = self._graph
maxEdges = 0
densestNode = ""
for i in range(len(G.edges())):
if ((len(G.edges('s' + str(i))) > maxEdges)):
maxEdges = len(G.edges('s' + str(i)))
densestNode = "s" + str(i)
## Find each node with a given number of edges, from 0 edges to maxEdges
## Find and store number of edges for each node in storageDict
## Key is 's1', value is number of edges
storageDict = {}
for n in G.nodes_iter():
storageDict[n] = len(G.edges(n))
orderedNodesEdgeCounts = col.OrderedDict(sorted(storageDict.items(), key=lambda (key, value): int(key.split('s')[1])))
## Create ordered list to visualize data
sortedList = sorted(storageDict.values())
# Calculate basic statistics
statisticsArray = np.array(sortedList)
averageNumberEdges = np.mean(statisticsArray)
stdNumberEdges = np.std(statisticsArray)
print("average edge count:")
print(averageNumberEdges)
print("standard deviation edge count: ")
print(stdNumberEdges)
# using 95th percentile as upper limit (z = 1.96)
upperLimit = averageNumberEdges + 1.96 * stdNumberEdges
print("95th percentile: ")
print(upperLimit)
##unused
## numberEdges is used for plotting (first element is edges for 's1', etc.)
numberEdges = []
k = 0
for i in range(1, (len(G.nodes()) + 1)):
numberEdges.append(orderedNodesEdgeCounts['s' + str(i)])
k = k + 1
## Number of colors is maxEdges
numColors = maxEdges;
# scaledEdges = [float(numberEdges[i])/float(upperLimit) for i in range(len(numberEdges))]
self._scaledEdges = [float(numberEdges[i])/float(maxEdges) for i in range(len(numberEdges))]
##Tweak this to change the heat map scaling for the points. Remove outliers.
##False coloration heatmap below. I've commented it out in this version b/c the rainbows
##are difficult to interpret. I've included a newer red version.
'''
self._heatMapBrain = [
# Let null values (0.0) have color rgb(0, 0, 0)
[0, 'rgb(0, 0, 0)'], #black
# Let first 5-10% (0.05) of the values have color rgb(204, 0, 204)
[0.05, 'rgb(153, 0, 153)'], #purple
[0.1, 'rgb(153, 0, 153)'], #purple
# Let next 10-15% (0.05) of the values have color rgb(204, 0, 204)
[0.1, 'rgb(204, 0, 204)'], #purple
[0.15, 'rgb(204, 0, 204)'], #purple
# Let values between 20-25% have color rgb(0, 0, 153)
[0.15, 'rgb(0, 0, 153)'], #blue
[0.2, 'rgb(0, 0, 153)'], #blue
# Let values between 25-30% have color rgb(0, 0, 204)
[0.2, 'rgb(0, 0, 204)'], #blue
[0.25, 'rgb(0, 0, 204)'], #blue
[0.25, 'rgb(0, 76, 153)'], #blue
[0.3, 'rgb(0, 76, 153)'], #blue
[0.3, 'rgb(0, 102, 204)'], #light blue
[0.35, 'rgb(0, 102, 204)'], #light blue
[0.35, 'rgb(0, 153, 153)'], #light blue
[0.4, 'rgb(0, 153, 153)'], #light blue
[0.4, 'rgb(0, 204, 204)'], #light blue
[0.45, 'rgb(0, 204, 204)'], #light blue
[0.45, 'rgb(0, 153, 76)'],
[0.5, 'rgb(0, 153, 76)'],
[0.5, 'rgb(0, 204, 102)'],
[0.55, 'rgb(0, 204, 102)'],
[0.55, 'rgb(0, 255, 0)'],
[0.6, 'rgb(0, 255, 0)'],
[0.6, 'rgb(128, 255, 0)'],
[0.65, 'rgb(128, 255, 0)'],
[0.65, 'rgb(255, 255, 0)'],
[0.7, 'rgb(255, 255, 0)'],
[0.7, 'rgb(255, 255, 102)'], #
[0.75, 'rgb(255, 255, 102)'], #
[0.75, 'rgb(255, 128, 0)'],
[0.8, 'rgb(255, 128, 0)'],
[0.8, 'rgb(204, 0, 0)'], #
[0.85, 'rgb(204, 0, 0)'], #
[0.85, 'rgb(255, 0, 0)'],
[0.9, 'rgb(255, 0, 0)'],
[0.9, 'rgb(255, 51, 51)'], #
[0.95, 'rgb(255, 51, 51)'], #
[0.95, 'rgb(255, 255, 255)'],
[1.0, 'rgb(255, 255, 255)']
]
'''
self._heatMapBrain = [
# Let null values (0.0) have color rgb(0, 0, 0)
[0, 'rgb(0, 0, 0)'], #black
[0.1, '#7f0000'],
[0.2, '#7f0000'],
[0.2, '#b30000'],
[0.3, '#b30000'],
[0.3, '#d7301f'],
[0.4, '#d7301f'],
[0.4, '#ef6548'],
[0.5, '#ef6548'],
[0.5, '#fc8d59'],
[0.6, '#fc8d59'],
[0.6, '#fdbb84'],
[0.7, '#fdbb84'],
[0.7, '#fdd49e'],
[0.8, '#fdd49e'],
[0.8, '#fee8c8'],
[0.9, '#fee8c8'],
[0.9, '#fff7ec'],
[1.0, '#fff7ec']
]
self._sortedList = sortedList
self._maxEdges = maxEdges
#figure = self.get_brain_figure(G, '')
#plotly.offline.plot(figure, filename = self._token + '/' + self._token + '_density.html')
def get_brain_figure(self, g, plot_title=''):
"""
Returns the plotly figure object for vizualizing a 3d brain network.
g: networkX object of brain
"""
# grab the node positions from the graphML file
V = nx.number_of_nodes(g)
attributes = nx.get_node_attributes(g,'attr')
node_positions_3d = pd.DataFrame(columns=['x', 'y', 'z'], index=range(V))
for n in g.nodes_iter():
node_positions_3d.loc[n] = [int((re.findall('\d+', str(attributes[n])))[0]), int((re.findall('\d+', str(attributes[n])))[1]), int((re.findall('\d+', str(attributes[n])))[2])]
# grab edge endpoints
edge_x = []
edge_y = []
edge_z = []
for e in g.edges_iter():
source_pos = node_positions_3d.loc[e[0]]
target_pos = node_positions_3d.loc[e[1]]
edge_x += [source_pos['x'], target_pos['x'], None]
edge_y += [source_pos['y'], target_pos['y'], None]
edge_z += [source_pos['z'], target_pos['z'], None]
Xlist = []
for i in range(1, len(g.nodes()) + 1):
Xlist.append(int((re.findall('\d+', str(attributes['s' + str(i)])))[0]))
Ylist = []
for i in range(1, len(g.nodes()) + 1):
Ylist.append(int((re.findall('\d+', str(attributes['s' + str(i)])))[1]))
Zlist = []
for i in range(1, len(g.nodes()) + 1):
Zlist.append(int((re.findall('\d+', str(attributes['s' + str(i)])))[2]))
# node style
node_trace = Scatter3d(x=Xlist,
y=Ylist,
z=Zlist,
mode='markers',
# name='regions',
marker=Marker(symbol='dot',
size=6,
opacity=0,
color=self._scaledEdges,
colorscale=self._heatMapBrain),
# text=[str(r) for r in range(V)],
# text=atlas_data['nodes'],
hoverinfo='text')
# edge style
'''edge_trace = Scatter3d(x=edge_x,
y=edge_y,
z=edge_z,
mode='lines',
line=Line(color='cyan', width=1),
hoverinfo='none')'''
# axis style
axis = dict(showbackground=False,
showline=False,
zeroline=False,
showgrid=False,
showticklabels=False)
# overall layout
layout = Layout(title=plot_title,
width=800,
height=900,
showlegend=False,
scene=Scene(xaxis=XAxis(axis),
yaxis=YAxis(axis),
zaxis=ZAxis(axis)),
margin=Margin(t=50),
hovermode='closest',
paper_bgcolor='rgba(1,1,1,1)',
plot_bgcolor='rgb(1,1,1)')
data = Data([node_trace])
fig = Figure(data=data, layout=layout)
return fig
def generate_heat_map(self):
# Get list of all possible number of edges, in order
setOfAllPossibleNumEdges = set(self._sortedList)
listOfAllPossibleNumEdges = list(setOfAllPossibleNumEdges)
#listOfAllScaledEdgeValues = [listOfAllPossibleNumEdges[i]/upperLimit for i in range(len(listOfAllPossibleNumEdges))]
listOfAllScaledEdgeValues = [listOfAllPossibleNumEdges[i]/float(self._maxEdges) for i in range(len(listOfAllPossibleNumEdges))]
#heatMapBrain
data = Data([
Scatter(
y=listOfAllPossibleNumEdges,
marker=Marker(
size=16,
color=listOfAllPossibleNumEdges,
colorbar=ColorBar(
title='Colorbar'
),
colorscale=self._heatMapBrain,
),
mode='markers')
])
layout = Layout(title=self._token + ' false coloration scheme',
width=800,
height=900,
showlegend=False,
margin=Margin(t=50),
hovermode='closest',
xaxis=dict(
title='Number of Unique Colors',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#000000')
),
yaxis=dict(
title='Number of Edges',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#000000')
),
paper_bgcolor='rgba(255,255,255,255)',
plot_bgcolor='rgb(255,255,255)')
mapping = Figure(data=data, layout=layout)
#iplot(mapping, validate=False)
#plotly.offline.plot(mapping, filename = self._token + '/' + self._token + 'heatmap' + '.html')
return mapping
| apache-2.0 |
bsipocz/scikit-image | doc/examples/plot_ssim.py | 15 | 2238 | """
===========================
Structural similarity index
===========================
When comparing images, the mean squared error (MSE)--while simple to
implement--is not highly indicative of perceived similarity. Structural
similarity aims to address this shortcoming by taking texture into account
[1]_, [2]_.
The example shows two modifications of the input image, each with the same MSE,
but with very different mean structural similarity indices.
.. [1] Zhou Wang; Bovik, A.C.; ,"Mean squared error: Love it or leave it? A new
look at Signal Fidelity Measures," Signal Processing Magazine, IEEE,
vol. 26, no. 1, pp. 98-117, Jan. 2009.
.. [2] Z. Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image quality
assessment: From error visibility to structural similarity," IEEE
Transactions on Image Processing, vol. 13, no. 4, pp. 600-612,
Apr. 2004.
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from skimage import data, img_as_float
from skimage.measure import structural_similarity as ssim
matplotlib.rcParams['font.size'] = 9
img = img_as_float(data.camera())
rows, cols = img.shape
noise = np.ones_like(img) * 0.2 * (img.max() - img.min())
noise[np.random.random(size=noise.shape) > 0.5] *= -1
def mse(x, y):
return np.linalg.norm(x - y)
img_noise = img + noise
img_const = img + abs(noise)
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(8, 4))
mse_none = mse(img, img)
ssim_none = ssim(img, img, dynamic_range=img.max() - img.min())
mse_noise = mse(img, img_noise)
ssim_noise = ssim(img, img_noise, dynamic_range=img_const.max() - img_const.min())
mse_const = mse(img, img_const)
ssim_const = ssim(img, img_const, dynamic_range=img_noise.max() - img_noise.min())
label = 'MSE: %2.f, SSIM: %.2f'
ax0.imshow(img, cmap=plt.cm.gray, vmin=0, vmax=1)
ax0.set_xlabel(label % (mse_none, ssim_none))
ax0.set_title('Original image')
ax1.imshow(img_noise, cmap=plt.cm.gray, vmin=0, vmax=1)
ax1.set_xlabel(label % (mse_noise, ssim_noise))
ax1.set_title('Image with noise')
ax2.imshow(img_const, cmap=plt.cm.gray, vmin=0, vmax=1)
ax2.set_xlabel(label % (mse_const, ssim_const))
ax2.set_title('Image plus constant')
plt.show()
| bsd-3-clause |
xhqu1981/pymatgen | pymatgen/analysis/pourbaix/plotter.py | 1 | 34818 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import platform
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import FormatStrFormatter
"""
This module provides classes for plotting Pourbaix objects.
"""
import six
from six.moves import map
from six.moves import zip
__author__ = "Sai Jayaraman"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Sai Jayaraman"
__email__ = "sjayaram@mit.edu"
__status__ = "Production"
__date__ = "Jan 26, 2012"
import numpy as np
import re
import collections
from pymatgen.analysis.pourbaix.analyzer import PourbaixAnalyzer
from pymatgen.analysis.pourbaix.maker import PREFAC
from pymatgen.analysis.pourbaix.entry import MultiEntry
from pymatgen.phasediagram.plotter import uniquelines
from pymatgen.util.string import latexify
from pymatgen.util.plotting import pretty_plot
from pymatgen.util.coord_utils import in_coord_list
class PourbaixPlotter(object):
"""
A plotter class for phase diagrams.
Args:
phasediagram: A PhaseDiagram object.
show_unstable: Whether unstable phases will be plotted as well as
red crosses. Defaults to False.
"""
def __init__(self, pourbaixdiagram, show_unstable=False):
self._pd = pourbaixdiagram
self.lines = uniquelines(self._pd.facets)
self.show_unstable = show_unstable
@property
def pourbaix_hull_plot_data(self):
"""
Pourbaix diagram convex hull data.
Returns:
(lines, stable_entries, unstable_entries)
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
facetlines = self.lines
lines = list()
stable_entries = dict()
for line in facetlines:
entry1 = entries[line[0]]
entry2 = entries[line[1]]
x = [data[line[0]][0], data[line[1]][0]]
y = [data[line[0]][1], data[line[1]][1]]
z = [data[line[0]][2], data[line[1]][2]]
coord = [x, y, z]
lines.append(coord)
labelcoord = list(zip(*coord))
stable_entries[labelcoord[0]] = entry1
stable_entries[labelcoord[1]] = entry2
allentries = pd.all_entries
alldata = np.array(pd.qhull_data)
unstable_entries = dict()
stable = pd.stable_entries
for i in range(len(allentries)):
entry = allentries[i]
if entry not in stable:
x = [alldata[i][0], alldata[i][0]]
y = [alldata[i][1], alldata[i][1]]
z = [alldata[i][2], alldata[i][2]]
coord = [x, y, z]
labelcoord = list(zip(*coord))
unstable_entries[entry] = labelcoord[0]
return lines, stable_entries, unstable_entries
def show(self, label_stable=True, label_unstable=False, filename=""):
"""
Draws the convex hull diagram using Matplotlib and show it.
"""
plt = self._get_plot(label_stable=label_stable,
label_unstable=label_unstable)
if filename == "":
plt.show()
else:
plt.savefig(filename, bbox_inches=0)
def _get_plot(self, label_stable=True, label_unstable=False):
"""
Plot convex hull of Pourbaix Diagram entries
"""
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib.font_manager import FontProperties
fig = plt.figure()
ax = p3.Axes3D(fig)
font = FontProperties()
font.set_weight("bold")
font.set_size(14)
(lines, labels, unstable) = self.pourbaix_hull_plot_data
count = 1
newlabels = list()
for x, y, z in lines:
ax.plot(x, y, z, "bo-", linewidth=3, markeredgecolor="b",
markerfacecolor="r", markersize=10)
for coords in sorted(labels.keys()):
entry = labels[coords]
label = self.print_name(entry)
if label_stable:
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(
count, latexify_ion(latexify(label))))
count += 1
if label_unstable:
for entry in unstable.keys():
label = self.print_name(entry)
coords = unstable[entry]
ax.plot([coords[0], coords[0]], [coords[1], coords[1]],
[coords[2], coords[2]], "bo", markerfacecolor="g",
markersize=10)
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(
count, latexify_ion(latexify(label))))
count += 1
plt.figtext(0.01, 0.01, "\n".join(newlabels))
plt.xlabel("pH")
plt.ylabel("V")
return plt
def plot_planes(self):
"""
Plot the free energy facets as a function of pH and V
"""
if self.show_unstable:
entries = self._pd._all_entries
else:
entries = self._pd.stable_entries
num_plots = len(entries)
import matplotlib.pyplot as plt
colormap = plt.cm.gist_ncar
fig = plt.figure().gca(projection='3d')
color_array = [colormap(i) for i in np.linspace(0, 0.9, num_plots)]
labels = []
color_index = -1
for entry in entries:
normal = np.array([-PREFAC * entry.npH, -entry.nPhi, +1])
d = entry.g0
color_index += 1
pH, V = np.meshgrid(np.linspace(-10, 28, 100),
np.linspace(-3, 3, 100))
g = (-normal[0] * pH - normal[1] * V + d) / normal[2]
lbl = latexify_ion(
latexify(entry._entry.composition.reduced_formula))
labels.append(lbl)
fig.plot_surface(pH, V, g, color=color_array[color_index],
label=lbl)
plt.legend(labels)
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.show()
def plot_chempot_range_map(self, limits=None, title="", filename=""):
self.plot_pourbaix(limits, title, filename)
def plot_pourbaix(self, limits=None, title="", filename="", label_domains=True):
plt = self.get_pourbaix_plot(limits=limits, title=title, label_domains=label_domains)
if filename == "":
plt.show()
else:
f = plt.gcf()
f.set_size_inches((11.5, 9))
plt.tight_layout(pad=1.09)
def pourbaix_plot_data(self, limits=None):
"""
Get data required to plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
Returns:
stable_entries, unstable_entries
stable_entries: dict of lines. The keys are Pourbaix Entries, and
lines are in the form of a list
unstable_entries: list of unstable entries
"""
analyzer = PourbaixAnalyzer(self._pd)
self._analyzer = analyzer
if limits:
analyzer.chempot_limits = limits
chempot_ranges = analyzer.get_chempot_range_map(limits)
self.chempot_ranges = chempot_ranges
stable_entries_list = collections.defaultdict(list)
for entry in chempot_ranges:
for line in chempot_ranges[entry]:
x = [line.coords[0][0], line.coords[1][0]]
y = [line.coords[0][1], line.coords[1][1]]
coords = [x, y]
stable_entries_list[entry].append(coords)
unstable_entries_list = [entry for entry in self._pd.all_entries
if entry not in self._pd.stable_entries]
return stable_entries_list, unstable_entries_list
def get_center(self, lines):
"""
Returns coordinates of center of a domain. Useful
for labeling a Pourbaix plot.
Args:
lines:
Lines corresponding to a domain
limits:
Limits of Pourbaix diagram
Returns:
center_x, center_y:
x,y coordinate of center of domain. If domain lies
outside limits, center will lie on the boundary.
"""
center_x = 0.0
center_y = 0.0
coords = []
count_center = 0.0
for line in lines:
for coord in np.array(line).T:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
cx = coord[0]
cy = coord[1]
center_x += cx
center_y += cy
count_center += 1.0
if count_center == 0.0:
count_center = 1.0
center_x /= count_center
center_y /= count_center
return center_x, center_y
def get_distribution_corrected_center(self, lines, h2o_h_line=None, h2o_o_line=None, radius=None):
"""
Returns coordinates of distribution corrected center of a domain. Similar to get_center(), but
considers the distance to the surronding lines that mostly affects the feeling of "center".
This function will also try avoid overalapping the text babel with H2O stability line if H2O
stability line is provided. Useful for labeling a Pourbaix plot.
Args:
lines:
Lines corresponding to a domain
limits:
Limits of Pourbaix diagram
h2o_h_line: Hydrogen line of H2O stability
h2o_o_line: Oxygen line of H2O stablity
radius: Half height of the text label.
Returns:
center_x, center_y:
x,y coordinate of center of domain. If domain lies
outside limits, center will lie on the boundary.
"""
coords = []
pts_x = []
pts_y = []
for line in lines:
for coord in np.array(line).T:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
cx = coord[0]
cy = coord[1]
pts_x.append(cx)
pts_y.append(cy)
if len(pts_x) < 1:
return 0.0, 0.0
cx_1 = (max(pts_x) + min(pts_x)) / 2.0
cy_1 = (max(pts_y) + min(pts_y)) / 2.0
mid_x_list = []
mid_y_list = []
# move the center to the center of surrounding lines
for line in lines:
(x1, y1), (x2, y2) = np.array(line).T
if (x1 - cx_1) * (x2 - cx_1) <= 0.0:
# horizontal line
mid_y = ((y2 - y1) / (x2 - x1)) * (cx_1 - x1) + y1
assert (y2 - mid_y) * (y1 - mid_y) <= 0.0
mid_y_list.append(mid_y)
if (y1 - cy_1) * (y2 - cy_1) <= 0.0:
# vertical line
mid_x = ((x2 - x1) / (y2 - y1)) * (cy_1 - y1) + x1
assert (x2 - mid_x) * (x1 - mid_x) <= 0.0
mid_x_list.append(mid_x)
upper_y = sorted([y for y in mid_y_list if y >= cy_1] + [cy_1])[0]
lower_y = sorted([y for y in mid_y_list if y < cy_1] + [cy_1])[-1]
left_x = sorted([x for x in mid_x_list if x <= cx_1] + [cx_1])[-1]
right_x = sorted([x for x in mid_x_list if x > cx_1] + [cx_1])[0]
center_x = (left_x + right_x) / 2.0
center_y = (upper_y + lower_y) / 2.0
if h2o_h_line is not None:
(h2o_h_x1, h2o_h_y1), (h2o_h_x2, h2o_h_y2) = h2o_h_line.T
h_slope = (h2o_h_y2 - h2o_h_y1) / (h2o_h_x2 - h2o_h_x1)
(h2o_o_x1, h2o_o_y1), (h2o_o_x2, h2o_o_y2) = h2o_o_line.T
o_slope = (h2o_o_y2 - h2o_o_y1) / (h2o_o_x2 - h2o_o_x1)
h_y = h_slope * (cx_1 - h2o_h_x1) + h2o_h_y1
o_y = o_slope * (cx_1 - h2o_o_x1) + h2o_o_y1
h2o_y = None
if abs(center_y - h_y) < radius:
h2o_y = h_y
elif abs(center_y - o_y) < radius:
h2o_y = o_y
if h2o_y is not None:
if (upper_y - lower_y) / 2.0 > radius * 2.0:
# The space can hold the whole text (radius * 2.0)
if h2o_y > center_y:
center_y = h2o_y - radius
else:
center_y = h2o_y + radius
return center_x, center_y
def get_pourbaix_plot(self, limits=None, title="", label_domains=True):
"""
Plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
Returns:
plt:
matplotlib plot object
"""
# plt = pretty_plot(24, 14.4)
plt = pretty_plot(16)
(stable, unstable) = self.pourbaix_plot_data(limits)
if limits:
xlim = limits[0]
ylim = limits[1]
else:
xlim = self._analyzer.chempot_limits[0]
ylim = self._analyzer.chempot_limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
for entry, lines in stable.items():
center_x = 0.0
center_y = 0.0
coords = []
count_center = 0.0
for line in lines:
(x, y) = line
plt.plot(x, y, "k-", linewidth=lw)
for coord in np.array(line).T:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
cx = coord[0]
cy = coord[1]
center_x += cx
center_y += cy
count_center += 1.0
if count_center == 0.0:
count_center = 1.0
center_x /= count_center
center_y /= count_center
if ((center_x <= xlim[0]) | (center_x >= xlim[1]) |
(center_y <= ylim[0]) | (center_y >= ylim[1])):
continue
xy = (center_x, center_y)
if label_domains:
plt.annotate(self.print_name(entry), xy, fontsize=20, color="b")
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt
def print_name(self, entry):
"""
Print entry name if single, else print multientry
"""
str_name = ""
if isinstance(entry, MultiEntry):
if len(entry.entrylist) > 2:
return str(self._pd.qhull_entries.index(entry))
for e in entry.entrylist:
str_name += latexify_ion(latexify(e.name)) + " + "
str_name = str_name[:-3]
return str_name
else:
return latexify_ion(latexify(entry.name))
def legend(self, label_unstable=False, legend_file=""):
if self._pd._multielement:
unprocessed_entries = self._pd.unprocessed_entries
set_of_entries = set()
list_of_entries = {}
for entry in self._pd.stable_entries:
index_ent = self._pd.qhull_entries.index(entry)
str_ename = ""
for e in entry.entrylist:
str_ename += e.name + " + "
for ent in unprocessed_entries:
if ent.name == e.name:
indx = unprocessed_entries.index(ent)
set_of_entries.add(indx)
continue
str_ename = str_ename[:-3]
list_of_entries[index_ent] = str_ename
if label_unstable:
for entry in [entry for entry in self._pd.all_entries
if entry not in self._pd.stable_entries]:
for e in entry.entrylist:
indx = unprocessed_entries.index(e)
set_of_entries.add(indx)
str_labels = " Species: \n"
if legend_file:
f = open(legend_file, 'w')
for i in list_of_entries.keys():
str_labels += str(i) + " : " + list_of_entries[i] + "\n"
f.write(str_labels)
f.close()
return str_labels
def write_image(self, plt, stream, image_format="svg"):
"""
Writes the phase diagram to an image in a stream.
Args:
plt:
matplotlib plot
stream:
stream to write to. Can be a file stream or a StringIO stream.
image_format
format for image. Can be any of matplotlib supported formats.
Defaults to svg for best results for vector graphics.
"""
f = plt.gcf()
f.set_size_inches((12, 10))
plt.tight_layout(pad=1.09)
plt.savefig(stream, format=image_format)
def domain_vertices(self, entry):
"""
Returns the vertices of the Pourbaix domain.
Args:
entry: Entry for which domain vertices are desired
Returns:
list of vertices
"""
if entry not in self._analyzer.pourbaix_domain_vertices.keys():
return []
return self._analyzer.pourbaix_domain_vertices[entry]
def get_pourbaix_plot_colorfill_by_element(self, limits=None, title="",
label_domains=True, element=None):
"""
Color domains by element
"""
from matplotlib.patches import Polygon
entry_dict_of_multientries = collections.defaultdict(list)
plt = pretty_plot(16)
optim_colors = ['#0000FF', '#FF0000', '#00FF00', '#FFFF00', '#FF00FF',
'#FF8080', '#DCDCDC', '#800000', '#FF8000']
optim_font_color = ['#FFFFA0', '#00FFFF', '#FF00FF', '#0000FF', '#00FF00',
'#007F7F', '#232323', '#7FFFFF', '#007FFF']
hatch = ['/', '\\', '|', '-', '+', 'o', '*']
(stable, unstable) = self.pourbaix_plot_data(limits)
num_of_overlaps = {key: 0 for key in stable.keys()}
for entry in stable:
if isinstance(entry, MultiEntry):
for e in entry.entrylist:
if element in e.composition.elements:
entry_dict_of_multientries[e.name].append(entry)
num_of_overlaps[entry] += 1
else:
entry_dict_of_multientries[entry.name].append(entry)
if limits:
xlim = limits[0]
ylim = limits[1]
else:
xlim = self._analyzer.chempot_limits[0]
ylim = self._analyzer.chempot_limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
from pymatgen import Composition, Element
from pymatgen.core.ion import Ion
def len_elts(entry):
if "(s)" in entry:
comp = Composition(entry[:-3])
else:
comp = Ion.from_formula(entry)
return len([el for el in comp.elements if el not in
[Element("H"), Element("O")]])
sorted_entry = entry_dict_of_multientries.keys()
sorted_entry.sort(key=len_elts)
i = -1
label_chr = map(chr, list(range(65, 91)))
for entry in sorted_entry:
color_indx = 0
x_coord = 0.0
y_coord = 0.0
npts = 0
i += 1
for e in entry_dict_of_multientries[entry]:
hc = 0
fc = 0
bc = 0
xy = self.domain_vertices(e)
c = self.get_center(stable[e])
x_coord += c[0]
y_coord += c[1]
npts += 1
color_indx = i
if "(s)" in entry:
comp = Composition(entry[:-3])
else:
comp = Ion.from_formula(entry)
if len([el for el in comp.elements if el not in
[Element("H"), Element("O")]]) == 1:
if color_indx >= len(optim_colors):
color_indx = color_indx -\
int(color_indx / len(optim_colors)) * len(optim_colors)
patch = Polygon(xy, facecolor=optim_colors[color_indx],
closed=True, lw=3.0, fill=True)
bc = optim_colors[color_indx]
else:
if color_indx >= len(hatch):
color_indx = color_indx - int(color_indx / len(hatch)) * len(hatch)
patch = Polygon(xy, hatch=hatch[color_indx], closed=True, lw=3.0, fill=False)
hc = hatch[color_indx]
ax.add_patch(patch)
xy_center = (x_coord / npts, y_coord / npts)
if label_domains:
if color_indx >= len(optim_colors):
color_indx = color_indx -\
int(color_indx / len(optim_colors)) * len(optim_colors)
fc = optim_font_color[color_indx]
if bc and not hc:
bbox = dict(boxstyle="round", fc=fc)
if hc and not bc:
bc = 'k'
fc = 'w'
bbox = dict(boxstyle="round", hatch=hc, fill=False)
if bc and hc:
bbox = dict(boxstyle="round", hatch=hc, fc=fc)
# bbox.set_path_effects([PathEffects.withSimplePatchShadow()])
plt.annotate(latexify_ion(latexify(entry)), xy_center,
color=bc, fontsize=30, bbox=bbox)
# plt.annotate(label_chr[i], xy_center,
# color=bc, fontsize=30, bbox=bbox)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt
def get_pourbaix_plot_colorfill_by_domain_name(self, limits=None, title="",
label_domains=True, label_color='k', domain_color=None, domain_fontsize=None,
domain_edge_lw=0.5, bold_domains=None, cluster_domains=(),
add_h2o_stablity_line=True, add_center_line=False, h2o_lw=0.5,
fill_domain=True, width=8, height=None, font_family='Times New Roman'):
"""
Color domains by the colors specific by the domain_color dict
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
lable_domains (Bool): whether add the text lable for domains
label_color (str): color of domain lables, defaults to be black
domain_color (dict): colors of each domain e.g {"Al(s)": "#FF1100"}. If set
to None default color set will be used.
domain_fontsize (int): Font size used in domain text labels.
domain_edge_lw (int): line width for the boundaries between domains.
bold_domains (list): List of domain names to use bold text style for domain
lables. If set to False, no domain will be bold.
cluster_domains (list): List of domain names in cluster phase
add_h2o_stablity_line (Bool): whether plot H2O stability line
add_center_line (Bool): whether plot lines shows the center coordinate
h2o_lw (int): line width for H2O stability line and center lines
fill_domain (bool): a version without color will be product if set
to False.
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width * golden
ratio.
font_family (str): Font family of the labels
"""
def special_lines(xlim, ylim):
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
return h_line, o_line, neutral_line, V0_line
from matplotlib.patches import Polygon
import copy
default_domain_font_size = 12
default_solid_phase_color = '#b8f9e7' # this slighly darker than the MP scheme, to
default_cluster_phase_color = '#d0fbef' # avoid making the cluster phase too light
plt = pretty_plot(width=width, height=height, dpi=300)
(stable, unstable) = self.pourbaix_plot_data(limits)
xlim, ylim = limits[:2] if limits else self._analyzer.chempot_limits[:2]
h_line, o_line, neutral_line, V0_line = special_lines(xlim, ylim)
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax.tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
sorted_entry = list(stable.keys())
sorted_entry.sort(key=lambda en: en.energy, reverse=True)
if domain_fontsize is None:
domain_fontsize = {en.name: default_domain_font_size for en in sorted_entry}
elif not isinstance(domain_fontsize, dict):
domain_fontsize = {en.name: domain_fontsize for en in sorted_entry}
if domain_color is None:
domain_color = {en.name: default_solid_phase_color if '(s)' in en.name else
(default_cluster_phase_color if en.name in cluster_domains else 'w')
for i, en in enumerate(sorted_entry)}
else:
domain_color = {en.name: domain_color[en.name] if en.name in domain_color else "w"
for i, en in enumerate(sorted_entry)}
if bold_domains is None:
bold_domains = [en.name for en in sorted_entry if '(s)' not in en.name]
if bold_domains == False:
bold_domains = []
for entry in sorted_entry:
xy = self.domain_vertices(entry)
if add_h2o_stablity_line:
c = self.get_distribution_corrected_center(stable[entry], h_line, o_line, 0.3)
else:
c = self.get_distribution_corrected_center(stable[entry])
patch = Polygon(xy, facecolor=domain_color[entry.name], edgecolor="black",
closed=True, lw=domain_edge_lw, fill=fill_domain, antialiased=True)
ax.add_patch(patch)
if label_domains:
if platform.system() == 'Darwin' and font_family == "Times New Roman":
# Have to hack to the hard coded font path to get current font On Mac OS X
if entry.name in bold_domains:
font = FontProperties(fname='/Library/Fonts/Times New Roman Bold.ttf',
size=domain_fontsize[entry.name])
else:
font = FontProperties(fname='/Library/Fonts/Times New Roman.ttf',
size=domain_fontsize[entry.name])
else:
if entry.name in bold_domains:
font = FontProperties(family=font_family,
weight='bold',
size=domain_fontsize[entry.name])
else:
font = FontProperties(family=font_family,
weight='regular',
size=domain_fontsize[entry.name])
plt.text(*c, s=self.print_name(entry),
fontproperties=font,
horizontalalignment="center", verticalalignment="center",
multialignment="center", color=label_color)
if add_h2o_stablity_line:
dashes = (3, 1.5)
line, = plt.plot(h_line[0], h_line[1], "k--", linewidth=h2o_lw, antialiased=True)
line.set_dashes(dashes)
line, = plt.plot(o_line[0], o_line[1], "k--", linewidth=h2o_lw, antialiased=True)
line.set_dashes(dashes)
if add_center_line:
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=h2o_lw, antialiased=False)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=h2o_lw, antialiased=False)
plt.xlabel("pH", fontname=font_family, fontsize=18)
plt.ylabel("E (V vs SHE)", fontname=font_family, fontsize=18)
plt.xticks(fontname=font_family, fontsize=16)
plt.yticks(fontname=font_family, fontsize=16)
plt.title(title, fontsize=20, fontweight='bold', fontname=font_family)
return plt
def get_pourbaix_mark_passive(self, limits=None, title="", label_domains=True, passive_entry=None):
"""
Color domains by element
"""
from matplotlib.patches import Polygon
from pymatgen import Element
from itertools import chain
import operator
plt = pretty_plot(16)
optim_colors = ['#0000FF', '#FF0000', '#00FF00', '#FFFF00', '#FF00FF',
'#FF8080', '#DCDCDC', '#800000', '#FF8000']
optim_font_colors = ['#FFC000', '#00FFFF', '#FF00FF', '#0000FF', '#00FF00',
'#007F7F', '#232323', '#7FFFFF', '#007FFF']
(stable, unstable) = self.pourbaix_plot_data(limits)
mark_passive = {key: 0 for key in stable.keys()}
if self._pd._elt_comp:
maxval = max(six.iteritems(self._pd._elt_comp), key=operator.itemgetter(1))[1]
key = [k for k, v in self._pd._elt_comp.items() if v == maxval]
passive_entry = key[0]
def list_elts(entry):
elts_list = set()
if isinstance(entry, MultiEntry):
for el in chain.from_iterable([[el for el in e.composition.elements]
for e in entry.entrylist]):
elts_list.add(el)
else:
elts_list = entry.composition.elements
return elts_list
for entry in stable:
if passive_entry + str("(s)") in entry.name:
mark_passive[entry] = 2
continue
if "(s)" not in entry.name:
continue
elif len(set([Element("O"), Element("H")]).intersection(set(list_elts(entry)))) > 0:
mark_passive[entry] = 1
if limits:
xlim = limits[0]
ylim = limits[1]
else:
xlim = self._analyzer.chempot_limits[0]
ylim = self._analyzer.chempot_limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
for e in stable.keys():
xy = self.domain_vertices(e)
c = self.get_center(stable[e])
if mark_passive[e] == 1:
color = optim_colors[0]
fontcolor = optim_font_colors[0]
colorfill = True
elif mark_passive[e] == 2:
color = optim_colors[1]
fontcolor = optim_font_colors[1]
colorfill = True
else:
color = "w"
colorfill = False
fontcolor = "k"
patch = Polygon(xy, facecolor=color, closed=True, lw=3.0, fill=colorfill)
ax.add_patch(patch)
if label_domains:
plt.annotate(self.print_name(e), c, color=fontcolor, fontsize=20)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt
def latexify_ion(formula):
return re.sub(r"()\[([^)]*)\]", r"\1$^{\2}$", formula)
| mit |
weixsong/algorithm | MLLearn/MLBox/logistic_regression/sms.py | 1 | 2883 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
''' 通过垃圾短信数据训练Logistic回归模型,并进行留存交叉验证
'''
import re
import random
import numpy as np
import matplotlib.pyplot as plt
from logreg_stoch_grad_ascent import LogisticRegressionClassifier
ENCODING = 'ISO-8859-1'
TRAIN_PERCENTAGE = 0.9
def get_doc_vector(words, vocabulary):
''' 根据词汇表将文档中的词条转换成文档向量
:param words: 文档中的词条列表
:type words: list of str
:param vocabulary: 总的词汇列表
:type vocabulary: list of str
:return doc_vect: 用于贝叶斯分析的文档向量
:type doc_vect: list of int
'''
doc_vect = [0]*len(vocabulary)
for word in words:
if word in vocabulary:
idx = vocabulary.index(word)
doc_vect[idx] += 1
return doc_vect
def parse_line(line):
''' 解析数据集中的每一行返回词条向量和短信类型.
'''
cls = line.split(',')[-1].strip()
content = ','.join(line.split(',')[: -1])
word_vect = [word.lower() for word in re.split(r'\W+', content) if word]
return word_vect, cls
def parse_file(filename):
''' 解析文件中的数据
'''
vocabulary, word_vects, classes = [], [], []
with open(filename, 'r', encoding=ENCODING) as f:
for line in f:
if line:
word_vect, cls = parse_line(line)
vocabulary.extend(word_vect)
word_vects.append(word_vect)
classes.append(cls)
vocabulary = list(set(vocabulary))
return vocabulary, word_vects, classes
if '__main__' == __name__:
clf = LogisticRegressionClassifier()
vocabulary, word_vects, classes = parse_file('english_big.txt')
# 训练数据 & 测试数据
ntest = int(len(classes)*(1-TRAIN_PERCENTAGE))
test_word_vects = []
test_classes = []
for i in range(ntest):
idx = random.randint(0, len(word_vects)-1)
test_word_vects.append(word_vects.pop(idx))
test_classes.append(classes.pop(idx))
train_word_vects = word_vects
train_classes = classes
# 将类型标签改为0\1
f = lambda x: 1 if x == 'spam' else 0
train_classes = list(map(f, train_classes))
test_classes = list(map(f, test_classes))
train_dataset = [get_doc_vector(words, vocabulary) for words in train_word_vects]
# 训练LR模型
clf.stoch_gradient_ascent(train_dataset, train_classes)
# 测试模型
error = 0
for test_word_vect, test_cls in zip(test_word_vects, test_classes):
test_data = get_doc_vector(test_word_vect, vocabulary)
pred_cls = clf.classify(test_data)
if test_cls != pred_cls:
print('Predict: {} -- Actual: {}'.format(pred_cls, test_cls))
error += 1
print('Error Rate: {}'.format(error/len(test_classes)))
| mit |
DebbyChiang/prospecting-reformat | For-LeadFuze/emails.py | 1 | 1868 | #import libraries
import pandas as pd
import numpy as np
#import file
prospects = pd.DataFrame(pd.read_csv("/Users/open/Desktop/department.csv"))
#Drop rows with no company
prospects = prospects.dropna(subset=['Company'])
#Copy orange emails into 'Test Email 1'
prospects['Test Email 1'] = ""
prospects['Test Email 1'] = prospects['Email']
#Delete orange emails in original column/filter green emails
prospects['Email'] = ""
#Reorder columns
prospects = prospects.drop('Unnamed: 0', axis = 1)
cols = ['Company',
'First Name',
'Last Name',
'Title',
'Department',
'# of ppl in Target City',
'# of ppl',
'Lead Owner',
'Industry',
'Email',
'Test Email 1',
'Rating',
'Notes',
'Address',
'City',
'State',
'Postal Code',
'Country',
'Phone',
'Website',
'Hook',
'Email Type']
prospects = prospects[cols]
#Set up Full Contact API
import requests
import json
api_key = 'xxx'
url = "https://api.fullcontact.com/v2/person.json"
def whois(**kwargs):
if 'xxx' not in kwargs:
kwargs['xxx'] = api_key
r = requests.get(url, params=kwargs)
return json.loads(r.text)
#Prepare emails for testing
orange = list(prospects['Test Email 1'])
green = list(prospects['Email'])
#Test emails
for i in range(0, len(orange)):
email = orange[i]
parameters = {
'email': email,
'apiKey': 'xxx'
}
response = requests.get('https://api.fullcontact.com/v2/person.json', parameters)
data = json.loads(response.text)
if data['status'] == 200:
green[i] = orange[i]
orange[i] = ""
#Replace emails
prospects['Email'] = green
prospects['Test Email 1'] = orange
#Export file
prospects.to_csv('/Users/open/Desktop/emails.csv')
| gpl-2.0 |
jakirkham/lazyflow | lazyflow/operators/opDetectMissingData.py | 1 | 40074 | ###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
import logging
from functools import partial
import cPickle as pickle
import tempfile
from threading import Lock as ThreadLock
import re
from lazyflow.graph import Operator, InputSlot, OutputSlot
from lazyflow.stype import Opaque
from lazyflow.rtype import SubRegion
from lazyflow.request import Request, RequestPool
from lazyflow.roi import roiToSlice
import numpy as np
import vigra
logger = logging.getLogger(__name__)
class VersionError(Exception):
pass
def extractVersion(s):
# assuming a string with a decimal number inside
# e.g. "0.11-ubuntu", "haiku_os-sklearn-0.9"
reInt = re.compile("\d+")
m = reInt.findall(s)
if m is None or len(m)<1:
raise VersionError("Cannot determine sklearn version")
else:
return int(m[1])
try:
from sklearn.svm import SVC
havesklearn = True
from sklearn import __version__ as sklearnVersion
svcTakesScaleC = extractVersion(sklearnVersion) < 11
except ImportError, VersionError:
logger.warning("Could not import dependency 'sklearn' for SVMs")
havesklearn = False
_defaultBinSize = 30
############################
############################
############################
### ###
### DETECTION OPERATOR ###
### ###
############################
############################
############################
class OpDetectMissing(Operator):
'''
Sub-Operator for detection of missing image content
'''
InputVolume = InputSlot()
PatchSize = InputSlot(value=128)
HaloSize = InputSlot(value=30)
DetectionMethod = InputSlot(value='classic')
NHistogramBins = InputSlot(value=_defaultBinSize)
OverloadDetector = InputSlot(value='')
#histograms: ndarray, shape: nHistograms x (NHistogramBins.value + 1)
# the last column holds the label, i.e. {0: negative, 1: positive}
TrainingHistograms = InputSlot()
Output = OutputSlot()
Detector = OutputSlot(stype=Opaque)
### PRIVATE class attributes ###
_manager = None
### PRIVATE attributes ###
_inputRange = (0, 255)
_needsTraining = True
_felzenOpts = {"firstSamples": 250, "maxRemovePerStep": 0,
"maxAddPerStep": 250, "maxSamples": 1000,
"nTrainingSteps": 4}
def __init__(self, *args, **kwargs):
super(OpDetectMissing, self).__init__(*args, **kwargs)
self.TrainingHistograms.setValue(_defaultTrainingHistograms())
def propagateDirty(self, slot, subindex, roi):
if slot == self.InputVolume:
self.Output.setDirty(roi)
if slot == self.TrainingHistograms:
OpDetectMissing._needsTraining = True
if slot == self.NHistogramBins:
OpDetectMissing._needsTraining = \
OpDetectMissing._manager.has(self.NHistogramBins.value)
if slot == self.PatchSize or slot == self.HaloSize:
self.Output.setDirty()
if slot == self.OverloadDetector:
s = self.OverloadDetector.value
self.loads(s)
self.Output.setDirty()
def setupOutputs(self):
self.Output.meta.assignFrom(self.InputVolume.meta)
self.Output.meta.dtype = np.uint8
# determine range of input
if self.InputVolume.meta.dtype == np.uint8:
r = (0, 255)
elif self.InputVolume.meta.dtype == np.uint16:
r = (0, 65535)
else:
#FIXME hardcoded range, use np.iinfo
r = (0, 255)
self._inputRange = r
self.Detector.meta.shape = (1,)
def execute(self, slot, subindex, roi, result):
if slot == self.Detector:
result = self.dumps()
return result
# sanity check
assert self.DetectionMethod.value in ['svm', 'classic'], \
"Unknown detection method '{}'".format(self.DetectionMethod.value)
# prefill result
resultZYXCT = vigra.taggedView(
result, self.InputVolume.meta.axistags).withAxes(*'zyxct')
# acquire data
data = self.InputVolume.get(roi).wait()
dataZYXCT = vigra.taggedView(
data, self.InputVolume.meta.axistags).withAxes(*'zyxct')
# walk over time and channel axes
for t in range(dataZYXCT.shape[4]):
for c in range(dataZYXCT.shape[3]):
resultZYXCT[..., c, t] = \
self._detectMissing(dataZYXCT[..., c, t])
return result
def _detectMissing(self, data):
'''
detects missing regions and labels each missing region with 1
:param data: 3d data with axistags 'zyx'
:type data: array-like
'''
assert data.axistags.index('z') == 0 \
and data.axistags.index('y') == 1 \
and data.axistags.index('x') == 2 \
and len(data.shape) == 3, \
"Data must be 3d with axis 'zyx'."
result = np.zeros(data.shape, dtype=np.uint8)
patchSize = self.PatchSize.value
haloSize = self.HaloSize.value
if patchSize is None or not patchSize > 0:
raise ValueError("PatchSize must be a positive integer")
if haloSize is None or haloSize < 0:
raise ValueError("HaloSize must be a non-negative integer")
maxZ = data.shape[0]
# walk over slices
for z in range(maxZ):
patches, slices = _patchify(data[z, :, :], patchSize, haloSize)
hists = []
# walk over patches
for patch in patches:
(hist, _) = np.histogram(
patch, bins=self.NHistogramBins.value,
range=self._inputRange, density=True)
hists.append(hist)
hists = np.vstack(hists)
pred = self.predict(hists, method=self.DetectionMethod.value)
for i, p in enumerate(pred):
if p > 0:
#patch is classified as missing
result[z, slices[i][0], slices[i][1]] |= 1
return result
def train(self, force=False):
'''
trains with samples drawn from slot TrainingHistograms
(retrains only if bin size is currently untrained or force is True)
'''
# return early if unneccessary
if not force and not OpDetectMissing._needsTraining and \
OpDetectMissing._manager.has(self.NHistogramBins.value):
return
#return if we don't have svms
if not havesklearn:
return
logger.debug("Training for {} histogram bins ...".format(
self.NHistogramBins.value))
if self.DetectionMethod.value == 'classic' or not havesklearn:
# no need to train this
return
histograms = self.TrainingHistograms[:].wait()
logger.debug("Finished loading histogram data of shape {}.".format(
histograms.shape))
assert histograms.shape[1] >= self.NHistogramBins.value+1 and \
len(histograms.shape) == 2, \
"Training data has wrong shape (expected: (n,{}), got: {}.".format(
self.NHistogramBins.value+1, histograms.shape)
labels = histograms[:, self.NHistogramBins.value]
histograms = histograms[:, :self.NHistogramBins.value]
neg_inds = np.where(labels == 0)[0]
pos_inds = np.setdiff1d(np.arange(len(labels)), neg_inds)
pos = histograms[pos_inds]
neg = histograms[neg_inds]
npos = len(pos)
nneg = len(neg)
#prepare for 10-fold cross-validation
nfolds = 10
cfp = np.zeros((nfolds,))
cfn = np.zeros((nfolds,))
cprec = np.zeros((nfolds,))
crec = np.zeros((nfolds,))
pos_random = np.random.permutation(len(pos))
neg_random = np.random.permutation(len(neg))
logger.debug(
"Starting training with " +
"{} negative patches and {} positive patches...".format(
len(neg), len(pos)))
self._felzenszwalbTraining(neg, pos)
logger.debug("Finished training.")
OpDetectMissing._needsTraining = False
def _felzenszwalbTraining(self, negative, positive):
'''
we want to train on a 'hard' subset of the training data, see
FELZENSZWALB ET AL.: OBJECT DETECTION WITH DISCRIMINATIVELY TRAINED PART-BASED MODELS (4.4), PAMI 32/9
'''
#TODO sanity checks
n = (self.PatchSize.value + self.HaloSize.value)**2
method = self.DetectionMethod.value
# set options for Felzenszwalb training
firstSamples = self._felzenOpts["firstSamples"]
maxRemovePerStep = self._felzenOpts["maxRemovePerStep"]
maxAddPerStep = self._felzenOpts["maxAddPerStep"]
maxSamples = self._felzenOpts["maxSamples"]
nTrainingSteps = self._felzenOpts["nTrainingSteps"]
# initial choice of training samples
(initNegative, choiceNegative, _, _) = \
_chooseRandomSubset(negative, min(firstSamples, len(negative)))
(initPositive, choicePositive, _, _) = \
_chooseRandomSubset(positive, min(firstSamples, len(positive)))
# setup for parallel training
samples = [negative, positive]
choice = [choiceNegative, choicePositive]
S_t = [initNegative, initPositive]
finished = [False, False]
### BEGIN SUBROUTINE ###
def felzenstep(x, cache, ind):
case = ("positive" if ind > 0 else "negative") + " set"
pred = self.predict(x, method=method)
hard = np.where(pred != ind)[0]
easy = np.setdiff1d(range(len(x)), hard)
logger.debug(" {}: currently {} hard and {} easy samples".format(
case, len(hard), len(easy)))
# shrink the cache
easyInCache = np.intersect1d(easy, cache) if len(easy) > 0 else []
if len(easyInCache) > 0:
(removeFromCache, _, _, _) = _chooseRandomSubset(
easyInCache, min(len(easyInCache), maxRemovePerStep))
cache = np.setdiff1d(cache, removeFromCache)
logger.debug(" {}: shrunk the cache by {} elements".format(
case, len(removeFromCache)))
# grow the cache
temp = len(cache)
addToCache = _chooseRandomSubset(
hard, min(len(hard), maxAddPerStep))[0]
cache = np.union1d(cache, addToCache)
addedHard = len(cache)-temp
logger.debug(" {}: grown the cache by {} elements".format(
case, addedHard))
if len(cache) > maxSamples:
logger.debug(
" {}: Cache to big, removing elements.".format(case))
cache = _chooseRandomSubset(cache, maxSamples)[0]
# apply the cache
C = x[cache]
return (C, cache, addedHard == 0)
### END SUBROUTINE ###
### BEGIN PARALLELIZATION FUNCTION ###
def partFun(i):
(C, newChoice, newFinished) = felzenstep(samples[i], choice[i], i)
S_t[i] = C
choice[i] = newChoice
finished[i] = newFinished
### END PARALLELIZATION FUNCTION ###
for k in range(nTrainingSteps):
logger.debug(
"Felzenszwalb Training " +
"(step {}/{}): {} hard negative samples, {}".format(
k+1, nTrainingSteps, len(S_t[0]), len(S_t[1])) +
"hard positive samples.")
self.fit(S_t[0], S_t[1], method=method)
pool = RequestPool()
for i in range(len(S_t)):
req = Request(partial(partFun, i))
pool.add(req)
pool.wait()
pool.clean()
if np.all(finished):
#already have all hard examples in training set
break
self.fit(S_t[0], S_t[1], method=method)
logger.debug(" Finished Felzenszwalb Training.")
#####################
### CLASS METHODS ###
#####################
@classmethod
def fit(cls, negative, positive, method='classic'):
'''
train the underlying SVM
'''
if cls._manager is None:
cls._manager = SVMManager()
if method == 'classic' or not havesklearn:
return
assert len(negative.shape) == 2, \
"Negative training set must have shape (nSamples, nHistogramBins)."
assert len(positive.shape) == 2, \
"Positive training set must have shape (nSamples, nHistogramBins)."
assert negative.shape[1] == positive.shape[1], \
"Negative and positive histograms must have the same number of bins."
nBins = negative.shape[1]
labels = [0]*len(negative) + [1]*len(positive)
samples = np.vstack((negative, positive))
if svcTakesScaleC:
# old scikit-learn versions take scale_C as a parameter
# new ones don't and default to True
svm = SVC(C=1000, kernel=_histogramIntersectionKernel, scale_C=True)
else:
svm = SVC(C=1000, kernel=_histogramIntersectionKernel)
svm.fit(samples, labels)
cls._manager.add(svm, nBins, overwrite=True)
@classmethod
def predict(cls, X, method='classic'):
'''
predict if the histograms in X correspond to missing regions
do this for subsets of X in parallel
'''
if cls._manager is None:
cls._manager = SVMManager()
assert len(X.shape) == 2, \
"Prediction data must have shape (nSamples, nHistogramBins)."
nBins = X.shape[1]
if method == 'classic' or not havesklearn:
svm = PseudoSVC()
else:
try:
svm = cls._manager.get(nBins)
except SVMManager.NotTrainedError:
# fail gracefully if not trained => responsibility of user!
svm = PseudoSVC()
y = np.zeros((len(X),))*np.nan
pool = RequestPool()
chunkSize = 1000 # FIXME magic number??
nChunks = len(X)/chunkSize + (1 if len(X) % chunkSize > 0 else 0)
s = [slice(k*chunkSize, min((k+1)*chunkSize, len(X)))
for k in range(nChunks)]
def partFun(i):
y[s[i]] = svm.predict(X[s[i]])
for i in range(nChunks):
req = Request(partial(partFun, i))
pool.add(req)
pool.wait()
pool.clean()
# not neccessary
#assert not np.any(np.isnan(y))
return np.asarray(y)
@classmethod
def has(cls, n, method='classic'):
if cls._manager is None:
cls._manager = SVMManager()
logger.debug(str(cls._manager))
if method == 'classic' or not havesklearn:
return True
return cls._manager.has(n)
@classmethod
def reset(cls):
cls._manager = SVMManager()
logger.debug("Reset all detectors.")
@classmethod
def dumps(cls):
if cls._manager is None:
cls._manager = SVMManager()
return pickle.dumps(cls._manager.extract())
@classmethod
def loads(cls, s):
if cls._manager is None:
cls._manager = SVMManager()
if len(s) > 0:
try:
d = pickle.loads(s)
except Exception as err:
logger.error(
"Failed overloading detector due to an error: {}".format(
str(err)))
return
cls._manager.overload(d)
logger.debug("Loaded detector: {}".format(str(cls._manager)))
#############################
#############################
#############################
### ###
### TOOLS ###
### ###
#############################
#############################
#############################
class PseudoSVC(object):
'''
pseudo SVM
'''
def __init__(self, *args, **kwargs):
pass
def fit(self, *args, **kwargs):
pass
def predict(self, *args, **kwargs):
X = args[0]
out = np.zeros(len(X))
for k, patch in enumerate(X):
out[k] = 1 if np.all(patch[1:] == 0) else 0
return out
class SVMManager(object):
'''
manages our SVMs for multiple bin numbers
'''
_svms = None
class NotTrainedError(Exception):
pass
def __init__(self):
self._svms = {'version': 1}
def get(self, n):
try:
return self._svms[n]
except KeyError:
raise self.NotTrainedError(
"Detector for bin size {} not trained.\nHave {}.".format(
n, self._svms))
def add(self, svm, n, overwrite=False):
if not n in self._svms.keys() or overwrite:
self._svms[n] = svm
def remove(self, n):
try:
del self._svms[n]
except KeyError:
#don't fail, just complain
logger.error("Tried removing a detector which is not trained yet.")
def has(self, n):
return n in self._svms
def extract(self):
return self._svms
def overload(self, obj):
if 'version' in obj and obj['version'] == self._svms['version']:
self._svms = obj
return
else:
try:
for n in obj['svm'].keys():
for svm in obj['svm'][n].values():
self.add(svm, n, overwrite=True)
except KeyError:
#don't fail, just complain
logger.error(
"Detector overload format not recognized, "
"no detector loaded.")
def __str__(self):
return str(self._svms)
def _chooseRandomSubset(data, n):
choice = np.random.permutation(len(data))
return (data[choice[:n]], choice[:n], data[choice[n:]], choice[n:])
def _patchify(data, patchSize, haloSize):
'''
data must be 2D y-x
returns (patches, slices)
'''
patches = []
slices = []
nPatchesX = data.shape[1]/patchSize + \
(1 if data.shape[1] % patchSize > 0 else 0)
nPatchesY = data.shape[0]/patchSize + \
(1 if data.shape[0] % patchSize > 0 else 0)
for y in range(nPatchesY):
for x in range(nPatchesX):
right = min((x+1)*patchSize + haloSize, data.shape[1])
bottom = min((y+1)*patchSize + haloSize, data.shape[0])
rightIsIncomplete = (x+1)*patchSize > data.shape[1]
bottomIsIncomplete = (y+1)*patchSize > data.shape[0]
left = max(x*patchSize - haloSize, 0) if not rightIsIncomplete \
else max(0, right-patchSize-haloSize)
top = max(y*patchSize - haloSize, 0) if not bottomIsIncomplete \
else max(0, bottom - patchSize - haloSize)
patches.append(data[top:bottom, left:right])
if rightIsIncomplete:
horzSlice = slice(
max(data.shape[1]-patchSize, 0), data.shape[1])
else:
horzSlice = slice(patchSize*x, patchSize*(x+1))
if bottomIsIncomplete:
vertSlice = slice(
max(data.shape[0]-patchSize, 0), data.shape[0])
else:
vertSlice = slice(patchSize*y, patchSize*(y+1))
slices.append((vertSlice, horzSlice))
return (patches, slices)
def _histogramIntersectionKernel(X, Y):
'''
implements the histogram intersection kernel in a fancy way
(standard: k(x,y) = sum(min(x_i,y_i)) )
'''
A = X.reshape((X.shape[0], 1, X.shape[1]))
B = Y.reshape((1, ) + Y.shape)
return np.sum(np.minimum(A, B), axis=2)
def _defaultTrainingHistograms():
'''
produce a standard training set with black regions
'''
nHists = 100
n = _defaultBinSize+1
hists = np.zeros((nHists, n))
# generate nHists/2 positive sets
for i in range(nHists/2):
(hists[i, :n-1], _) = np.histogram(
np.zeros((64, 64), dtype=np.uint8), bins=_defaultBinSize,
range=(0, 255), density=True)
hists[i, n-1] = 1
for i in range(nHists/2, nHists):
(hists[i, :n-1], _) = np.histogram(
np.random.random_integers(60, 180, (64, 64)), bins=_defaultBinSize,
range=(0, 255), density=True)
return hists
#####################################
### HISTOGRAM EXTRACTION FUNCTION ###
#####################################
def extractHistograms(volume, labels, patchSize=64, haloSize=0,
nBins=30, intRange=(0, 255), appendPositions=False):
'''
extracts histograms from 3d-volume
- labels are
0 ignore
1 positive
2 negative
- histogram extraction is attempted to be done in parallel
- patches that intersect with the volume border are discarded
- volume and labels must be 3d, and in order 'zyx' (if not VigraArrays)
- returns: np.ndarray, shape: (nSamples,nBins+1), last column is the label
'''
# progress reporter class, histogram extraction can take quite a long time
class ProgressReporter(object):
lock = None
def __init__(self, nThreads):
self.lock = ThreadLock()
self.nThreads = nThreads
self.status = np.zeros((nThreads,))
def report(self, index):
self.lock.acquire()
self.status[index] = 1
logger.debug("Finished threads: %d/%d." %
(self.status.sum(), len(self.status)))
self.lock.release()
# sanity checks
assert len(volume.shape) == 3, "Volume must be 3d data"
assert volume.shape == labels.shape,\
"Volume and labels must have the same shape"
try:
volumeZYX = volume.withAxes(*'zyx')
labelsZYX = labels.withAxes(*'zyx')
except AttributeError:
# can't blame me
volumeZYX = volume
labelsZYX = labels
pass
# compute actual patch size
patchSize = patchSize + 2*haloSize
# fill list of patch centers (VigraArray does not support bitwise_or)
ind_z, ind_y, ind_x = np.where(
(labelsZYX == 1).view(np.ndarray) | (labelsZYX == 2).view(np.ndarray))
index = np.arange(len(ind_z))
# prepare chunking of histogram centers
chunkSize = 10000 # FIXME magic number??
nChunks = len(index)//chunkSize + (1 if len(index) % chunkSize > 0 else 0)
sliceList = [slice(k*chunkSize, min((k+1)*chunkSize, len(index)))
for k in range(nChunks)]
histoList = [None]*nChunks
# prepare subroutine for parallel extraction
reporter = ProgressReporter(nChunks)
#BEGIN subroutine
def _extractHistogramsSub(itemList):
xs = ind_x[itemList]
ys = ind_y[itemList]
zs = ind_z[itemList]
ymin = ys - patchSize//2
ymax = ymin + patchSize
xmin = xs - patchSize//2
xmax = xmin + patchSize
validPatchIndices = np.where(
np.all(
(ymin >= 0,
xmin >= 0,
xmax <= volumeZYX.shape[2],
ymax <= volumeZYX.shape[1]),
axis=0))[0]
if appendPositions:
out = np.zeros((len(validPatchIndices), nBins+4))
else:
out = np.zeros((len(validPatchIndices), nBins+1))
for k, patchInd in enumerate(validPatchIndices):
x = xs[patchInd]
y = ys[patchInd]
z = zs[patchInd]
vol = volumeZYX[z, ymin[patchInd]:ymax[patchInd],
xmin[patchInd]:xmax[patchInd]]
(out[k, :nBins], _) = np.histogram(
vol, bins=nBins, range=intRange, density=True)
out[k, nBins] = 1 if labelsZYX[z, y, x] == 1 else 0
if appendPositions:
out[k, nBins+1:] = [z, y, x]
return out
def partFun(i):
itemList = index[sliceList[i]]
histos = _extractHistogramsSub(itemList)
histoList[i] = histos
reporter.report(i)
#END subroutine
# pool the extraction requests
pool = RequestPool()
for i in range(nChunks):
req = Request(partial(partFun, i))
pool.add(req)
pool.wait()
pool.clean()
return np.vstack(histoList)
############################
############################
############################
### ###
### MAIN ###
### ###
############################
############################
############################
def toH5(data, pathOrGroup, pathInFile, compression=None):
try:
return vigra.impex.writeHDF5(
data, pathOrGroup, pathInFile, compression)
except TypeError:
# old vigra does not support compression
logger.debug("'compression' argument not yet supported by vigra.")
return vigra.impex.writeHDF5(data, pathOrGroup, pathInFile)
if __name__ == "__main__":
import argparse
import os.path
from sys import exit
import time
import csv
from lazyflow.graph import Graph
from lazyflow.operators.opDetectMissingData import _histogramIntersectionKernel
logging.basicConfig()
logger.setLevel(logging.INFO)
thisTime = time.strftime("%Y-%m-%d_%H.%M")
# BEGIN ARGPARSE
parser = argparse.ArgumentParser(
description='Train a missing slice detector'+
"""
Example invocation:
python2 opDetectMissingData.py block1_test.h5 block1_testLabels.h5 --patch 64 --halo 32 --bins 30 -d ~/testing/2013_08_16 -t 9-12 --opts 200,0,400,1000,2 --shape "(1024,1024,14)"
""")
parser.add_argument(
'file', nargs='*', action='store',
help="volume and labels (if omitted, the working directory must contain histogram files)")
parser.add_argument(
'-d', '--directory', dest='directory', action='store', default="/tmp",
help='working directory, histograms and detector file will be stored there')
parser.add_argument(
'-t', '--testingrange', dest='testingrange', action='store', default=None,
help='the z range of the labels that are for testing (like "0-3,11,17-19" which would evaluate to [0,1,2,3,11,17,18,19])')
parser.add_argument(
'-f', '--force', dest='force', action='store_true', default=False,
help='force extraction of histograms, even if the directory already contains histograms')
parser.add_argument(
'--patch', dest='patchSize', action='store', default='64',
help='patch size (e.g.: "32,64-128")')
parser.add_argument(
'--halo', dest='haloSize', action='store', default='64',
help='halo size (e.g.: "32,64-128")')
parser.add_argument(
'--bins', dest='binSize', action='store', default='30',
help='number of histogram bins (e.g.: "10-15,20")')
parser.add_argument(
'--shape', dest='shape', action='store', default=None,
help='shape of the volume in tuple notation "(x,y,z)" (only neccessary if loading histograms from file)')
parser.add_argument(
'--opts', dest='opts', action='store', default='250,0,250,1000,4',
help='<initial number of samples>,<maximum number of samples removed per step>,<maximum number of samples added per step>,' +
'<maximum number of samples>,<number of steps> (e.g. 250,0,250,1000,4)')
args = parser.parse_args()
# END ARGPARSE
# BEGIN FILESYSTEM
workingdir = args.directory
assert os.path.isdir(workingdir), \
"Directory '{}' does not exist.".format(workingdir)
for f in args.file:
assert os.path.isfile(f), "'{}' does not exist.".format(f)
# END FILESYSTEM
# BEGIN NORMALIZE
def _expand(rangelist):
if rangelist is not None:
singleRanges = rangelist.split(',')
expandedRanges = []
for r in singleRanges:
r2 = r.split('-')
if len(r2) == 1:
expandedRanges.append(int(r))
elif len(r2) == 2:
for i in range(int(r2[0]), int(r2[1])+1):
expandedRanges.append(i)
else:
logger.error("Syntax Error: '{}'".format(r))
exit(33)
return np.asarray(expandedRanges)
else:
return np.zeros((0,))
testrange = _expand(args.testingrange)
patchSizes = _expand(args.patchSize)
haloSizes = _expand(args.haloSize)
binSizes = _expand(args.binSize)
try:
opts = [int(opt) for opt in args.opts.split(",")]
assert len(opts) == 5
opts = dict(zip(
["firstSamples", "maxRemovePerStep", "maxAddPerStep",
"maxSamples", "nTrainingSteps"], opts))
except:
raise ValueError(
"Cannot parse '--opts' argument '{}'".format(args.opts))
# END NORMALIZE
csvfile = open(os.path.join(
workingdir, "%s_test_results.tsv" % (thisTime,)), 'w')
csvwriter = csv.DictWriter(
csvfile, fieldnames=("patch", "halo", "bins", "recall", "precision"),
delimiter=' ', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csvwriter.writeheader()
op = OpDetectMissing(graph=Graph())
op._felzenOpts = opts
logger.info("Starting training script ({})".format(
time.strftime("%Y-%m-%d %H:%M")))
t_start = time.time()
# iterate training conditions
for patchSize in patchSizes:
for haloSize in haloSizes:
for binSize in binSizes:
histfile = os.path.join(
workingdir,
"histograms_%d_%d_%d.h5" % (patchSize, haloSize, binSize))
detfile = os.path.join(
workingdir,
"%s_detector_%d_%d_%d.pkl" % (
thisTime, patchSize, haloSize, binSize))
predfile = os.path.join(
workingdir,
"%s_prediction_results_%d_%d_%d.h5" % (
thisTime, patchSize, haloSize, binSize))
startFromLabels = args.force or not os.path.exists(histfile)
# EXTRACT HISTOGRAMS
if startFromLabels:
logger.info("Gathering histograms from {} patches (this could take a while) ...".format(
(patchSize, haloSize, binSize)))
assert len(args.file) == 2, \
"If there are no histograms available, volume and labels must be provided."
locs = ['/volume/data', '/cube']
volume = None
labels = None
for l in locs:
try:
volume = vigra.impex.readHDF5(args.file[0], l).withAxes(*'zyx')
break
except KeyError:
pass
if volume is None:
logger.error(
"Could not find a volume in {} with paths {}".format(
args.file[0], locs))
csvfile.close()
exit(42)
for l in locs:
try:
labels = vigra.impex.readHDF5(
args.file[1], '/volume/data').withAxes(*'zyx')
break
except KeyError:
pass
if labels is None:
logger.error(
"Could not find a volume in {} with paths {}".format(
args.file[1], locs))
csvfile.close()
exit(43)
volShape = volume.withAxes(*'xyz').shape
# bear with me, complicated axistags stuff is neccessary
# for my old vigra to work
trainrange = np.setdiff1d(
np.arange(volume.shape[0]), testrange)
trainData = vigra.taggedView(
volume[trainrange, :, :],
axistags=vigra.defaultAxistags('zyx'))
trainLabels = vigra.taggedView(
labels[trainrange, :, :],
axistags=vigra.defaultAxistags('zyx'))
trainHistograms = extractHistograms(
trainData, trainLabels, patchSize=patchSize,
haloSize=haloSize, nBins=binSize, intRange=(0, 255),
appendPositions=True)
if len(testrange) > 0:
testData = vigra.taggedView(
volume[testrange, :, :],
axistags=vigra.defaultAxistags('zyx'))
testLabels = vigra.taggedView(
labels[testrange, :, :],
axistags=vigra.defaultAxistags('zyx'))
testHistograms = extractHistograms(
testData, testLabels, patchSize=patchSize,
haloSize=haloSize, nBins=binSize,
intRange=(0, 255), appendPositions=True)
else:
testHistograms = np.zeros(
(0, trainHistograms.shape[1]))
vigra.impex.writeHDF5(
trainHistograms, histfile, '/volume/train')
if len(testHistograms) > 0:
vigra.impex.writeHDF5(
testHistograms, histfile, '/volume/test')
logger.info("Dumped histograms to '{}'.".format(histfile))
else:
logger.info("Gathering histograms from file...")
trainHistograms = vigra.impex.readHDF5(
histfile, '/volume/train')
try:
testHistograms = vigra.impex.readHDF5(
histfile, '/volume/test')
except KeyError:
testHistograms = np.zeros(
(0, trainHistograms.shape[1]))
logger.info("Loaded histograms from '{}'.".format(
histfile))
assert trainHistograms.shape[1] == binSize+4
assert testHistograms.shape[1] == binSize+4
if len(testHistograms) > 0:
if args.shape is None:
logger.warning(
"Guessing the shape of the original data...")
volShape = (1024, 1024, 512, )
else:
volShape = eval(args.shape)
assert isinstance(volShape, tuple) \
and len(volShape) == 3
assert not np.any(np.isinf(trainHistograms))
assert not np.any(np.isnan(trainHistograms))
assert not np.any(np.isinf(testHistograms))
assert not np.any(np.isnan(testHistograms))
# TRAIN
logger.info("Training...")
op.PatchSize.setValue(patchSize)
op.HaloSize.setValue(haloSize)
op.DetectionMethod.setValue('svm')
op.NHistogramBins.setValue(binSize)
op.TrainingHistograms.setValue(trainHistograms[:, :binSize+1])
op.train(force=True)
# save detector
try:
if detfile is None:
with tempfile.NamedTemporaryFile(
suffix='.pkl', prefix='detector_',
delete=False) as f:
f.write(op.dumps())
else:
with open(detfile, 'w') as f:
logger.info(
"Detector written to {}".format(f.name))
f.write(op.dumps())
logger.info(
"Detector written to {}".format(f.name))
except Exception as e:
logger.error("==== BEGIN DETECTOR DUMP ====")
logger.error(op.dumps())
logger.error("==== END DETECTOR DUMP ====")
logger.error(str(e))
if len(testHistograms) == 0:
# no testing required
continue
logger.info("Testing...")
# split into histos, positions and labels
hists = testHistograms[:, :binSize]
labels = testHistograms[:, binSize]
zyxPos = testHistograms[:, binSize+1:]
pred = op.predict(hists, method='svm')
predNeg = pred[np.where(labels == 0)[0]]
predPos = pred[np.where(labels == 1)[0]]
fp = (predNeg.sum())/float(predNeg.size)
fn = (predPos.size - predPos.sum())/float(predPos.size)
prec = predPos.sum()/float(predPos.sum()+predNeg.sum())
recall = 1-fn
logger.info(
" Predicted {} histograms with patchSize={}, haloSize={}, bins={}.".format(
len(hists), patchSize, haloSize, binSize))
logger.info(" FPR=%.5f, FNR=%.5f (recall=%.5f, precision=%.5f)." %\
(fp, fn, recall, prec))
csvwriter.writerow(
{'patch': patchSize, 'halo': haloSize,
'bins': binSize, 'recall': recall, 'precision': prec})
logger.info("Writing prediction volume...")
predVol = vigra.VigraArray(
np.zeros(volShape, dtype=np.uint8),
axistags=vigra.defaultAxistags('xyz')).withAxes(*'zyx')
for i, p in enumerate(pred):
predVol[
testrange[zyxPos[i][0]], zyxPos[i][1], zyxPos[i][2]
] = p
toH5(predVol, predfile, '/volume/data', compression="GZIP")
logger.info(
"Finished training script ({})".format(
time.strftime("%Y-%m-%d %H:%M")))
t_stop = time.time()
logger.info("Duration: {}".format(
time.strftime(
"%Hh, %Mm, %Ss", time.gmtime((t_stop-t_start) % (24*60*60)))))
if (t_stop-t_start) >= 24*60*60:
logger.info(" and %d days!" % int(t_stop-t_start)//(24*60*60))
csvfile.close()
| lgpl-3.0 |
hunering/demo-code | python/books/DLWP/3.5-reuters.py | 1 | 1163 | import numpy as np
from keras.datasets import reuters
from keras import models
from keras import layers
from keras import optimizers
from keras import losses
from keras import metrics
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from utils import init_keras, vectorize_sequences
init_keras()
(x_train, t_train), (x_test, t_test) = reuters.load_data(num_words=10000)
x_train = vectorize_sequences(x_train)
x_test = vectorize_sequences(x_test)
t_train = to_categorical(t_train)
t_test = to_categorical(t_test)
x_train, x_cv, t_train, t_cv = train_test_split(
x_train, t_train, test_size=0.33)
network = models.Sequential()
network.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
network.add(layers.Dense(64, activation='relu'))
network.add(layers.Dense(46, activation='softmax'))
network.compile(optimizer=optimizers.RMSprop(lr=0.001),
loss=losses.categorical_crossentropy, metrics=[metrics.categorical_accuracy])
network.fit(x=x_train, y=t_train, epochs=15,
batch_size=512, validation_data=(x_cv, t_cv))
result = network.evaluate(x=x_test, y=t_test)
print(result) | gpl-3.0 |
zhengwsh/InplusTrader_Linux | InplusTrader/backtestEngine/model/account/stock_account.py | 1 | 12148 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import pandas as pd
from .base_account import BaseAccount
from ..dividend import Dividend
from ...const import SIDE, ACCOUNT_TYPE
from ...utils.i18n import gettext as _
from ...utils.logger import user_system_log, system_log
from ...execution_context import ExecutionContext
class StockAccount(BaseAccount):
def __init__(self, env, init_cash, start_date):
super(StockAccount, self).__init__(env, init_cash, start_date, ACCOUNT_TYPE.STOCK)
def before_trading(self):
super(StockAccount, self).before_trading()
positions = self.portfolio.positions
removing_ids = []
for order_book_id in positions.keys():
position = positions[order_book_id]
if position._quantity == 0:
removing_ids.append(order_book_id)
for order_book_id in removing_ids:
positions.pop(order_book_id, None)
trading_date = ExecutionContext.get_current_trading_dt().date()
self._handle_dividend_payable(trading_date)
if self.config.base.handle_split:
self._handle_split(trading_date)
def after_trading(self):
trading_date = ExecutionContext.get_current_trading_dt().date()
portfolio = self.portfolio
# de_listed may occur
portfolio._portfolio_value = None
positions = portfolio.positions
de_listed_id_list = []
# update buy_today_holding_quantity to zero for T+1
for order_book_id in positions:
position = positions[order_book_id]
position._buy_today_holding_quantity = 0
# 检查股票今天是否退市,如果退市,则按照close_price卖出,并warning
if position._de_listed_date is not None and trading_date >= position._de_listed_date.date():
de_listed_id_list.append(order_book_id)
for de_listed_id in de_listed_id_list:
position = positions[de_listed_id]
if self.config.validator.cash_return_by_stock_delisted:
portfolio._cash += position.market_value
if position._quantity != 0:
user_system_log.warn(
_("{order_book_id} is expired, close all positions by system").format(order_book_id=de_listed_id))
del positions[de_listed_id]
def settlement(self):
portfolio = self.portfolio
trading_date = ExecutionContext.get_current_trading_dt().date()
self.portfolio_persist()
portfolio._yesterday_portfolio_value = portfolio.portfolio_value
self._handle_dividend_ex_dividend(trading_date)
def bar(self, bar_dict):
portfolio = self.portfolio
# invalidate cache
portfolio._portfolio_value = None
positions = portfolio.positions
for order_book_id, position in six.iteritems(positions):
bar = bar_dict[order_book_id]
if not bar.isnan:
position._market_value = position._quantity * bar.close
position._last_price = bar.close
def tick(self, tick):
portfolio = self.portfolio
# invalidate cache
portfolio._portfolio_value = None
position = portfolio.positions[tick.order_book_id]
position._market_value = position._quantity * tick.last_price
position._last_price = tick.last_price
def order_pending_new(self, account, order):
if self != account:
return
if order._is_final():
return
order_book_id = order.order_book_id
position = self.portfolio.positions[order_book_id]
position._total_orders += 1
create_quantity = order.quantity
create_value = order._frozen_price * create_quantity
self._update_order_data(order, create_quantity, create_value)
self._update_frozen_cash(order, create_value)
def order_creation_pass(self, account, order):
pass
def order_creation_reject(self, account, order):
if self != account:
return
order_book_id = order.order_book_id
position = self.portfolio.positions[order_book_id]
position._total_orders -= 1
cancel_quantity = order.unfilled_quantity
cancel_value = order._frozen_price * cancel_quantity
self._update_order_data(order, cancel_quantity, cancel_value)
self._update_frozen_cash(order, -cancel_value)
def order_pending_cancel(self, account, order):
pass
def order_cancellation_pass(self, account, order):
self._cancel_order_cal(account, order)
def order_cancellation_reject(self, account, order):
pass
def order_unsolicited_update(self, account, order):
self._cancel_order_cal(account, order)
def _cancel_order_cal(self, account, order):
if self != account:
return
rejected_quantity = order.unfilled_quantity
rejected_value = order._frozen_price * rejected_quantity
self._update_order_data(order, -rejected_quantity, -rejected_value)
self._update_frozen_cash(order, -rejected_value)
def trade(self, account, trade):
if self != account:
return
portfolio = self.portfolio
portfolio._portfolio_value = None
order = trade.order
bar_dict = ExecutionContext.get_current_bar_dict()
order_book_id = order.order_book_id
position = portfolio.positions[order.order_book_id]
position._is_traded = True
trade_quantity = trade.last_quantity
minus_value_by_trade = order._frozen_price * trade_quantity
trade_value = trade.last_price * trade_quantity
if order.side == SIDE.BUY:
position._avg_price = (position._avg_price * position._quantity +
trade_quantity * trade.last_price) / (position._quantity + trade_quantity)
self._update_order_data(order, -trade_quantity, -minus_value_by_trade)
self._update_trade_data(order, trade, trade_quantity, trade_value)
self._update_frozen_cash(order, -minus_value_by_trade)
price = bar_dict[order_book_id].close
if order.side == SIDE.BUY and order.order_book_id not in \
{'510900.XSHG', '513030.XSHG', '513100.XSHG', '513500.XSHG'}:
position._buy_today_holding_quantity += trade_quantity
position._market_value = (position._buy_trade_quantity - position._sell_trade_quantity) * price
position._last_price = price
position._total_trades += 1
portfolio._total_tax += trade.tax
portfolio._total_commission += trade.commission
portfolio._cash = portfolio._cash - trade.tax - trade.commission
if order.side == SIDE.BUY:
portfolio._cash -= trade_value
else:
portfolio._cash += trade_value
self._last_trade_id = trade.exec_id
def _update_order_data(self, order, inc_order_quantity, inc_order_value):
position = self.portfolio.positions[order.order_book_id]
if order.side == SIDE.BUY:
position._buy_order_quantity += inc_order_quantity
position._buy_order_value += inc_order_value
else:
position._sell_order_quantity += inc_order_quantity
position._sell_order_value += inc_order_value
def _update_trade_data(self, order, trade, trade_quantity, trade_value):
position = self.portfolio.positions[order.order_book_id]
position._transaction_cost = position._transaction_cost + trade.commission + trade.tax
if order.side == SIDE.BUY:
position._buy_trade_quantity += trade_quantity
position._buy_trade_value += trade_value
else:
position._sell_trade_quantity += trade_quantity
position._sell_trade_value += trade_value
def _update_frozen_cash(self, order, inc_order_value):
portfolio = self.portfolio
if order.side == SIDE.BUY:
portfolio._frozen_cash += inc_order_value
portfolio._cash -= inc_order_value
def _handle_split(self, trading_date):
import rqdatac
for order_book_id, position in six.iteritems(self.portfolio.positions):
split_df = rqdatac.get_split(order_book_id, start_date="2005-01-01", end_date="2099-01-01")
if split_df is None:
system_log.warn(_("no split data {}").foramt(order_book_id))
continue
try:
series = split_df.loc[trading_date]
except KeyError:
continue
# 处理拆股
user_system_log.info(_("split {order_book_id}, {position}").format(
order_book_id=order_book_id,
position=position,
))
ratio = series.split_coefficient_to / series.split_coefficient_from
for key in ["_buy_order_quantity", "_sell_order_quantity", "_buy_trade_quantity", "_sell_trade_quantity"]:
setattr(position, key, getattr(position, key) * ratio)
user_system_log.info(_("split {order_book_id}, {position}").format(
order_book_id=order_book_id,
position=position,
))
user_system_log.info(_("split {order_book_id}, {series}").format(
order_book_id=order_book_id,
series=series,
))
def _handle_dividend_payable(self, trading_date):
"""handle dividend payable before trading
"""
to_delete_dividend = []
for order_book_id, dividend_info in six.iteritems(self.portfolio._dividend_info):
dividend_series_dict = dividend_info.dividend_series_dict
if pd.Timestamp(trading_date) == pd.Timestamp(dividend_series_dict['payable_date']):
dividend_per_share = dividend_series_dict["dividend_cash_before_tax"] / dividend_series_dict["round_lot"]
if dividend_per_share > 0 and dividend_info.quantity > 0:
dividend_cash = dividend_per_share * dividend_info.quantity
self.portfolio._dividend_receivable -= dividend_cash
self.portfolio._cash += dividend_cash
to_delete_dividend.append(order_book_id)
for order_book_id in to_delete_dividend:
self.portfolio._dividend_info.pop(order_book_id, None)
def _handle_dividend_ex_dividend(self, trading_date):
data_proxy = ExecutionContext.get_data_proxy()
for order_book_id, position in six.iteritems(self.portfolio.positions):
dividend_series = data_proxy.get_dividend_by_book_date(order_book_id, trading_date)
if dividend_series is None:
continue
dividend_series_dict = {
'book_closure_date': dividend_series['book_closure_date'].to_pydatetime(),
'ex_dividend_date': dividend_series['ex_dividend_date'].to_pydatetime(),
'payable_date': dividend_series['payable_date'].to_pydatetime(),
'dividend_cash_before_tax': float(dividend_series['dividend_cash_before_tax']),
'round_lot': int(dividend_series['round_lot'])
}
dividend_per_share = dividend_series_dict["dividend_cash_before_tax"] / dividend_series_dict["round_lot"]
self.portfolio._dividend_info[order_book_id] = Dividend(order_book_id, position._quantity, dividend_series_dict)
self.portfolio._dividend_receivable += dividend_per_share * position._quantity
| mit |
huzq/scikit-learn | sklearn/ensemble/_forest.py | 2 | 95540 | """
Forest of trees-based ensemble methods.
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
import numbers
from warnings import catch_warnings, simplefilter, warn
import threading
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from joblib import Parallel, delayed
from ..base import ClassifierMixin, RegressorMixin, MultiOutputMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning
from ._base import BaseEnsemble, _partition_estimators
from ..utils.fixes import _joblib_parallel_args
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils.validation import _deprecate_positional_args
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _get_n_samples_bootstrap(n_samples, max_samples):
"""
Get the number of samples in a bootstrap sample.
Parameters
----------
n_samples : int
Number of samples in the dataset.
max_samples : int or float
The maximum number of samples to draw from the total available:
- if float, this indicates a fraction of the total and should be
the interval `(0, 1)`;
- if int, this indicates the exact number of samples;
- if None, this indicates the total number of samples.
Returns
-------
n_samples_bootstrap : int
The total number of samples to draw for the bootstrap sample.
"""
if max_samples is None:
return n_samples
if isinstance(max_samples, numbers.Integral):
if not (1 <= max_samples <= n_samples):
msg = "`max_samples` must be in range 1 to {} but got value {}"
raise ValueError(msg.format(n_samples, max_samples))
return max_samples
if isinstance(max_samples, numbers.Real):
if not (0 < max_samples < 1):
msg = "`max_samples` must be in range (0, 1) but got value {}"
raise ValueError(msg.format(max_samples))
return round(n_samples * max_samples)
msg = "`max_samples` should be int or float, but got type '{}'"
raise TypeError(msg.format(type(max_samples)))
def _generate_sample_indices(random_state, n_samples, n_samples_bootstrap):
"""
Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples_bootstrap)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples, n_samples_bootstrap):
"""
Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples,
n_samples_bootstrap)
sample_counts = np.bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None,
n_samples_bootstrap=None):
"""
Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples,
n_samples_bootstrap)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with catch_warnings():
simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y,
indices=indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y,
indices=indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(MultiOutputMixin, BaseEnsemble, metaclass=ABCMeta):
"""
Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=100, *,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
max_samples=None):
super().__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
self.max_samples = max_samples
def apply(self, X):
"""
Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : ndarray of shape (n_samples, n_estimators)
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer="threads"))(
delayed(tree.apply)(X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""
Return the decision path in the forest.
.. versionadded:: 0.18
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse matrix of shape (n_samples, n_nodes)
Return a node indicator matrix where non zero elements indicates
that the samples goes through the nodes. The matrix is of CSR
format.
n_nodes_ptr : ndarray of shape (n_estimators + 1,)
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer='threads'))(
delayed(tree.decision_path)(X, check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""
Build a forest of trees from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, its dtype will be converted
to ``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
"""
# Validate or convert input data
if issparse(y):
raise ValueError(
"sparse multilabel-indicator for y is not supported."
)
X, y = self._validate_data(X, y, multi_output=True,
accept_sparse="csc", dtype=DTYPE)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
self.n_features_ = X.shape[1]
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Get bootstrap sample size
n_samples_bootstrap = _get_n_samples_bootstrap(
n_samples=X.shape[0],
max_samples=self.max_samples
)
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = [self._make_estimator(append=False,
random_state=random_state)
for i in range(n_more_estimators)]
# Parallel loop: we prefer the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading more efficient than multiprocessing in
# that case. However, for joblib 0.12+ we respect any
# parallel_backend contexts set at a higher level,
# since correctness does not rely on using threads.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer='threads'))(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight,
n_samples_bootstrap=n_samples_bootstrap)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""
Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""
Validate X whenever one tries to predict, apply, predict_proba."""
check_is_fitted(self)
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
The values of this array sum to 1, unless all trees are single node
trees consisting of only the root node, in which case it will be an
array of zeros.
"""
check_is_fitted(self)
all_importances = Parallel(n_jobs=self.n_jobs,
**_joblib_parallel_args(prefer='threads'))(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_ if tree.tree_.node_count > 1)
if not all_importances:
return np.zeros(self.n_features_, dtype=np.float64)
all_importances = np.mean(all_importances,
axis=0, dtype=np.float64)
return all_importances / np.sum(all_importances)
def _accumulate_prediction(predict, X, out, lock):
"""
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
"""
prediction = predict(X, check_input=False)
with lock:
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)):
out[i] += prediction[i]
class ForestClassifier(ClassifierMixin, BaseForest, metaclass=ABCMeta):
"""
Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=100, *,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
max_samples=None):
super().__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight,
max_samples=max_samples)
def _set_oob_score(self, X, y):
"""
Compute out-of-bag score."""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = [np.zeros((n_samples, n_classes_[k]))
for k in range(self.n_outputs_)]
n_samples_bootstrap = _get_n_samples_bootstrap(
n_samples, self.max_samples
)
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples, n_samples_bootstrap)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = \
np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('balanced', 'balanced_subsample')
if isinstance(self.class_weight, str):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample".'
'Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "balanced" or '
'"balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight '
'("balanced", classes, y). In place of y you can use '
'a large enough sample of the full training set '
'target to properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight != 'balanced_subsample' or
not self.bootstrap):
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""
Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
# all dtypes should be the same, so just take the first
class_type = self.classes_[0].dtype
predictions = np.empty((n_samples, self.n_outputs_),
dtype=class_type)
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""
Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest.
The class probability of a single tree is the fraction of samples of
the same class in a leaf.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : ndarray of shape (n_samples, n_classes), or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
all_proba = [np.zeros((X.shape[0], j), dtype=np.float64)
for j in np.atleast_1d(self.n_classes_)]
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose,
**_joblib_parallel_args(require="sharedmem"))(
delayed(_accumulate_prediction)(e.predict_proba, X, all_proba,
lock)
for e in self.estimators_)
for proba in all_proba:
proba /= len(self.estimators_)
if len(all_proba) == 1:
return all_proba[0]
else:
return all_proba
def predict_log_proba(self, X):
"""
Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : ndarray of shape (n_samples, n_classes), or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(RegressorMixin, BaseForest, metaclass=ABCMeta):
"""
Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=100, *,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
max_samples=None):
super().__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
max_samples=max_samples)
def predict(self, X):
"""
Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted values.
"""
check_is_fitted(self)
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
if self.n_outputs_ > 1:
y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)
else:
y_hat = np.zeros((X.shape[0]), dtype=np.float64)
# Parallel loop
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose,
**_joblib_parallel_args(require="sharedmem"))(
delayed(_accumulate_prediction)(e.predict, X, [y_hat], lock)
for e in self.estimators_)
y_hat /= len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""
Compute out-of-bag scores."""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
n_samples_bootstrap = _get_n_samples_bootstrap(
n_samples, self.max_samples
)
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples, n_samples_bootstrap)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
def _compute_partial_dependence_recursion(self, grid, target_features):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features)
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features)
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape (n_samples,)
The value of the partial dependence function on each grid point.
"""
grid = np.asarray(grid, dtype=DTYPE, order='C')
averaged_predictions = np.zeros(shape=grid.shape[0],
dtype=np.float64, order='C')
for tree in self.estimators_:
# Note: we don't sum in parallel because the GIL isn't released in
# the fast method.
tree.tree_.compute_partial_dependence(
grid, target_features, averaged_predictions)
# Average over the forest
averaged_predictions /= len(self.estimators_)
return averaged_predictions
class RandomForestClassifier(ForestClassifier):
"""
A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and uses averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is controlled with the `max_samples` parameter if
`bootstrap=True` (default), otherwise the whole dataset is used to build
each tree.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : int, default=100
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : {"gini", "entropy"}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : {"auto", "sqrt", "log2"}, int or float, default="auto"
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`round(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, default=None
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it
will be removed in 0.25. Use ``min_impurity_decrease`` instead.
bootstrap : bool, default=True
Whether bootstrap samples are used when building trees. If False, the
whole dataset is used to build each tree.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : int, default=None
The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors. See :term:`Glossary
<n_jobs>` for more details.
random_state : int or RandomState, default=None
Controls both the randomness of the bootstrapping of the samples used
when building trees (if ``bootstrap=True``) and the sampling of the
features to consider when looking for the best split at each node
(if ``max_features < n_features``).
See :term:`Glossary <random_state>` for details.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
class_weight : {"balanced", "balanced_subsample"}, dict or list of dicts, \
default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
max_samples : int or float, default=None
If bootstrap is True, the number of samples to draw from X
to train each base estimator.
- If None (default), then draw `X.shape[0]` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples. Thus,
`max_samples` should be in the interval `(0, 1)`.
.. versionadded:: 0.22
Attributes
----------
base_estimator_ : DecisionTreeClassifier
The child estimator template used to create the collection of fitted
sub-estimators.
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : ndarray of shape (n_classes,) or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_decision_function_ : ndarray of shape (n_samples, n_classes)
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN. This attribute exists
only when ``oob_score`` is True.
See Also
--------
DecisionTreeClassifier, ExtraTreesClassifier
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
Examples
--------
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=1000, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = RandomForestClassifier(max_depth=2, random_state=0)
>>> clf.fit(X, y)
RandomForestClassifier(...)
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
"""
@_deprecate_positional_args
def __init__(self,
n_estimators=100, *,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
ccp_alpha=0.0,
max_samples=None):
super().__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state", "ccp_alpha"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight,
max_samples=max_samples)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.ccp_alpha = ccp_alpha
class RandomForestRegressor(ForestRegressor):
"""
A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and uses averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is controlled with the `max_samples` parameter if
`bootstrap=True` (default), otherwise the whole dataset is used to build
each tree.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : int, default=100
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : {"mse", "mae"}, default="mse"
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : {"auto", "sqrt", "log2"}, int or float, default="auto"
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`round(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, default=None
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it
will be removed in 0.25. Use ``min_impurity_decrease`` instead.
bootstrap : bool, default=True
Whether bootstrap samples are used when building trees. If False, the
whole dataset is used to build each tree.
oob_score : bool, default=False
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : int, default=None
The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors. See :term:`Glossary
<n_jobs>` for more details.
random_state : int or RandomState, default=None
Controls both the randomness of the bootstrapping of the samples used
when building trees (if ``bootstrap=True``) and the sampling of the
features to consider when looking for the best split at each node
(if ``max_features < n_features``).
See :term:`Glossary <random_state>` for details.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
max_samples : int or float, default=None
If bootstrap is True, the number of samples to draw from X
to train each base estimator.
- If None (default), then draw `X.shape[0]` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples. Thus,
`max_samples` should be in the interval `(0, 1)`.
.. versionadded:: 0.22
Attributes
----------
base_estimator_ : DecisionTreeRegressor
The child estimator template used to create the collection of fitted
sub-estimators.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_prediction_ : ndarray of shape (n_samples,)
Prediction computed with out-of-bag estimate on the training set.
This attribute exists only when ``oob_score`` is True.
See Also
--------
DecisionTreeRegressor, ExtraTreesRegressor
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
The default value ``max_features="auto"`` uses ``n_features``
rather than ``n_features / 3``. The latter was originally suggested in
[1], whereas the former was more recently justified empirically in [2].
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
.. [2] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized
trees", Machine Learning, 63(1), 3-42, 2006.
Examples
--------
>>> from sklearn.ensemble import RandomForestRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, n_informative=2,
... random_state=0, shuffle=False)
>>> regr = RandomForestRegressor(max_depth=2, random_state=0)
>>> regr.fit(X, y)
RandomForestRegressor(...)
>>> print(regr.predict([[0, 0, 0, 0]]))
[-8.32987858]
"""
@_deprecate_positional_args
def __init__(self,
n_estimators=100, *,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
ccp_alpha=0.0,
max_samples=None):
super().__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state", "ccp_alpha"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
max_samples=max_samples)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.ccp_alpha = ccp_alpha
class ExtraTreesClassifier(ForestClassifier):
"""
An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and uses averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : int, default=100
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : {"gini", "entropy"}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : {"auto", "sqrt", "log2"}, int or float, default="auto"
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`round(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, default=None
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it
will be removed in 0.25. Use ``min_impurity_decrease`` instead.
bootstrap : bool, default=False
Whether bootstrap samples are used when building trees. If False, the
whole dataset is used to build each tree.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : int, default=None
The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors. See :term:`Glossary
<n_jobs>` for more details.
random_state : int, RandomState, default=None
Controls 3 sources of randomness:
- the bootstrapping of the samples used when building trees
(if ``bootstrap=True``)
- the sampling of the features to consider when looking for the best
split at each node (if ``max_features < n_features``)
- the draw of the splits for each of the `max_features`
See :term:`Glossary <random_state>` for details.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
class_weight : {"balanced", "balanced_subsample"}, dict or list of dicts, \
default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
max_samples : int or float, default=None
If bootstrap is True, the number of samples to draw from X
to train each base estimator.
- If None (default), then draw `X.shape[0]` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples. Thus,
`max_samples` should be in the interval `(0, 1)`.
.. versionadded:: 0.22
Attributes
----------
base_estimator_ : ExtraTreesClassifier
The child estimator template used to create the collection of fitted
sub-estimators.
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : ndarray of shape (n_classes,) or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_decision_function_ : ndarray of shape (n_samples, n_classes)
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN. This attribute exists
only when ``oob_score`` is True.
See Also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized
trees", Machine Learning, 63(1), 3-42, 2006.
Examples
--------
>>> from sklearn.ensemble import ExtraTreesClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_features=4, random_state=0)
>>> clf = ExtraTreesClassifier(n_estimators=100, random_state=0)
>>> clf.fit(X, y)
ExtraTreesClassifier(random_state=0)
>>> clf.predict([[0, 0, 0, 0]])
array([1])
"""
@_deprecate_positional_args
def __init__(self,
n_estimators=100, *,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
ccp_alpha=0.0,
max_samples=None):
super().__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state", "ccp_alpha"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight,
max_samples=max_samples)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.ccp_alpha = ccp_alpha
class ExtraTreesRegressor(ForestRegressor):
"""
An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and uses averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : int, default=100
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : {"mse", "mae"}, default="mse"
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : {"auto", "sqrt", "log2"}, int or float, default="auto"
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`round(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, default=None
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it
will be removed in 0.25. Use ``min_impurity_decrease`` instead.
bootstrap : bool, default=False
Whether bootstrap samples are used when building trees. If False, the
whole dataset is used to build each tree.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : int, default=None
The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors. See :term:`Glossary
<n_jobs>` for more details.
random_state : int or RandomState, default=None
Controls 3 sources of randomness:
- the bootstrapping of the samples used when building trees
(if ``bootstrap=True``)
- the sampling of the features to consider when looking for the best
split at each node (if ``max_features < n_features``)
- the draw of the splits for each of the `max_features`
See :term:`Glossary <random_state>` for details.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
max_samples : int or float, default=None
If bootstrap is True, the number of samples to draw from X
to train each base estimator.
- If None (default), then draw `X.shape[0]` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples. Thus,
`max_samples` should be in the interval `(0, 1)`.
.. versionadded:: 0.22
Attributes
----------
base_estimator_ : ExtraTreeRegressor
The child estimator template used to create the collection of fitted
sub-estimators.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_prediction_ : ndarray of shape (n_samples,)
Prediction computed with out-of-bag estimate on the training set.
This attribute exists only when ``oob_score`` is True.
See Also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.ensemble import ExtraTreesRegressor
>>> X, y = load_diabetes(return_X_y=True)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> reg = ExtraTreesRegressor(n_estimators=100, random_state=0).fit(
... X_train, y_train)
>>> reg.score(X_test, y_test)
0.2708...
"""
@_deprecate_positional_args
def __init__(self,
n_estimators=100, *,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
ccp_alpha=0.0,
max_samples=None):
super().__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state", "ccp_alpha"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
max_samples=max_samples)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.ccp_alpha = ccp_alpha
class RandomTreesEmbedding(BaseForest):
"""
An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int, default=100
Number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
max_depth : int, default=5
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, default=None
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it
will be removed in 0.25. Use ``min_impurity_decrease`` instead.
sparse_output : bool, default=True
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : int, default=None
The number of jobs to run in parallel. :meth:`fit`, :meth:`transform`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors. See :term:`Glossary
<n_jobs>` for more details.
random_state : int or RandomState, default=None
Controls the generation of the random `y` used to fit the trees
and the draw of the splits for each feature at the trees' nodes.
See :term:`Glossary <random_state>` for details.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
Attributes
----------
base_estimator_ : DecisionTreeClassifier instance
The child estimator template used to create the collection of fitted
sub-estimators.
estimators_ : list of DecisionTreeClassifier instances
The collection of fitted sub-estimators.
feature_importances_ : ndarray of shape (n_features,)
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
one_hot_encoder_ : OneHotEncoder instance
One-hot encoder used to create the sparse embedding.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
Examples
--------
>>> from sklearn.ensemble import RandomTreesEmbedding
>>> X = [[0,0], [1,0], [0,1], [-1,0], [0,-1]]
>>> random_trees = RandomTreesEmbedding(
... n_estimators=5, random_state=0, max_depth=1).fit(X)
>>> X_sparse_embedding = random_trees.transform(X)
>>> X_sparse_embedding.toarray()
array([[0., 1., 1., 0., 1., 0., 0., 1., 1., 0.],
[0., 1., 1., 0., 1., 0., 0., 1., 1., 0.],
[0., 1., 0., 1., 0., 1., 0., 1., 0., 1.],
[1., 0., 1., 0., 1., 0., 1., 0., 1., 0.],
[0., 1., 1., 0., 1., 0., 0., 1., 1., 0.]])
"""
criterion = 'mse'
max_features = 1
@_deprecate_positional_args
def __init__(self,
n_estimators=100, *,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
sparse_output=True,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False):
super().__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
max_samples=None)
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""
Fit estimator and transform dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
X_transformed : sparse matrix of shape (n_samples, n_out)
Transformed dataset.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super().fit(X, y, sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""
Transform dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix of shape (n_samples, n_out)
Transformed dataset.
"""
check_is_fitted(self)
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
printedheart/simpleai | simpleai/machine_learning/reinforcement_learning.py | 5 | 6345 | # -*- coding: utf-8 -*-
from collections import defaultdict, Counter
import math
import random
from simpleai.search.utils import argmax
import pickle
try:
import matplotlib.pyplot as plt
import numpy
except:
plt = None # lint:ok
numpy = None # lint:ok
def make_at_least_n_times(optimistic_reward, min_n):
def at_least_n_times_exploration(actions, utilities, temperature, action_counter):
utilities = [utilities[x] for x in actions]
for i, utility in enumerate(utilities):
if action_counter[actions[i]] < min_n:
utilities[i] = optimistic_reward
d = dict(zip(actions, utilities))
uf = lambda action: d[action]
return argmax(actions, uf)
return at_least_n_times_exploration
def boltzmann_exploration(actions, utilities, temperature, action_counter):
'''returns an action with a probability depending on utilities and temperature'''
utilities = [utilities[x] for x in actions]
temperature = max(temperature, 0.01)
_max = max(utilities)
_min = min(utilities)
if _max == _min:
return random.choice(actions)
utilities = [math.exp(((u - _min) / (_max - _min)) / temperature) for u in utilities]
probs = [u / sum(utilities) for u in utilities]
i = 0
tot = probs[i]
r = random.random()
while i < len(actions) and r >= tot:
i += 1
tot += probs[i]
return actions[i]
def make_exponential_temperature(initial_temperature, alpha):
'''returns a function like initial / exp(n * alpha)'''
def _function(n):
try:
return initial_temperature / math.exp(n * alpha)
except OverflowError:
return 0.01
return _function
class PerformanceCounter(object):
def __init__(self, learners, names=None):
self.learners = learners
for i, learner in enumerate(learners):
self.update_set_reward(learner)
learner.accumulated_rewards = []
learner.known_states = []
learner.temperatures = []
if names is None:
learner.name = 'Learner %d' % i
else:
learner.name = names[i]
def update_set_reward(self, learner):
def set_reward(reward, terminal=False):
if terminal:
if len(learner.accumulated_rewards) > 0:
learner.accumulated_rewards.append(learner.accumulated_rewards[-1] + reward)
else:
learner.accumulated_rewards.append(reward)
learner.known_states.append(len(learner.Q))
learner.temperatures.append(learner.temperature_function(learner.trials))
learner.old_set_reward(reward, terminal)
learner.old_set_reward = learner.set_reward
learner.set_reward = set_reward
def _make_plot(self, ax, data_name):
for learner in self.learners:
data = numpy.array(getattr(learner, data_name))
ax.plot(numpy.arange(len(data)), data, label=learner.name)
nice_name = data_name.replace('_', ' ').capitalize()
ax.set_title(nice_name)
ax.legend()
def show_statistics(self):
f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)
self._make_plot(ax1, 'accumulated_rewards')
self._make_plot(ax2, 'known_states')
self._make_plot(ax3, 'temperatures')
plt.show()
class RLProblem(object):
def actions(self, state):
'''Returns the actions available to perform from `state`.
The returned value is an iterable over actions.
'''
raise NotImplementedError()
def update_state(self, percept, agent):
'Override this method if you need to clean perception to a given agent'
return percept
def inverse(n):
if n == 0:
return 1
return 1.0 / n
def state_default():
return defaultdict(int)
class QLearner(object):
def __init__(self, problem, temperature_function=inverse,
discount_factor=1,
exploration_function=boltzmann_exploration,
learning_rate=inverse):
self.Q = defaultdict(state_default)
self.problem = problem
self.discount_factor = discount_factor
self.temperature_function = temperature_function
self.exploration_function = exploration_function
self.learning_rate = learning_rate
self.last_state = None
self.last_action = None
self.last_reward = None
self.counter = defaultdict(Counter)
self.trials = 0
def set_reward(self, reward, terminal=False):
self.last_reward = reward
if terminal:
self.trials += 1
self.Q[self.last_state][self.last_action] = reward
def program(self, percept):
s = self.last_state
a = self.last_action
state = self.problem.update_state(percept, self)
actions = self.problem.actions(state)
if len(actions) > 0:
current_action = self.exploration_function(actions, self.Q[state],
self.temperature_function(self.trials),
self.counter[state])
else:
current_action = None
if s is not None and current_action:
self.counter[s][a] += 1
self.update_rule(s, a, self.last_reward, state, current_action)
self.last_state = state
self.last_action = current_action
return current_action
def update_rule(self, s, a, r, cs, ca):
raise NotImplementedError
def dump(self, path):
self.temperature_function = inverse
with open(path, 'wb') as f:
pickle.dump(self, f)
@classmethod
def load(self, path):
with open(path, 'rb') as f:
return pickle.load(f)
class TDQLearner(QLearner):
def update_rule(self, s, a, r, cs, ca):
lr = self.learning_rate(self.counter[s][a])
self.Q[s][a] += lr * (r + self.discount_factor * max(self.Q[cs].values()) - self.Q[s][a])
class SARSALearner(QLearner):
def update_rule(self, s, a, r, cs, ca):
lr = self.learning_rate(self.counter[s][a])
self.Q[s][a] += lr * (r + self.discount_factor * self.Q[cs][ca] - self.Q[s][a])
| mit |
zfrenchee/pandas | pandas/plotting/_core.py | 1 | 99095 | # being a bit too dynamic
# pylint: disable=E1101
from __future__ import division
import warnings
import re
from collections import namedtuple
from distutils.version import LooseVersion
import numpy as np
from pandas.util._decorators import cache_readonly
from pandas.core.base import PandasObject
from pandas.core.config import get_option
from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike
from pandas.core.dtypes.common import (
is_list_like,
is_integer,
is_number,
is_hashable,
is_iterator)
from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame
from pandas.core.common import AbstractMethodError, _try_sort, _any_not_none
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.compat import range, lrange, map, zip, string_types
import pandas.compat as compat
from pandas.io.formats.printing import pprint_thing
from pandas.util._decorators import Appender
from pandas.plotting._compat import (_mpl_ge_1_3_1,
_mpl_ge_1_5_0,
_mpl_ge_2_0_0)
from pandas.plotting._style import (plot_params,
_get_standard_colors)
from pandas.plotting._tools import (_subplots, _flatten, table,
_handle_shared_axes, _get_all_lines,
_get_xlim, _set_ticks_props,
format_date_labels)
try:
from pandas.plotting import _converter
except ImportError:
pass
else:
if get_option('plotting.matplotlib.register_converters'):
_converter.register(explicit=True)
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _gca(rc=None):
import matplotlib.pyplot as plt
with plt.rc_context(rc):
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
@property
def _kind(self):
"""Specify kind str. Must be overridden in child class"""
raise NotImplementedError
_layout_type = 'vertical'
_default_rot = 0
orientation = None
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
'mark_right': True, 'stacked': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
sharey=False, use_index=True,
figsize=None, grid=None, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
table=False, layout=None, **kwds):
_converter._WARN = False
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility
# setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else self.plt.rcParams['axes.grid']
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop('xerr', None)
yerr = kwds.pop('yerr', None)
self.errors = {}
for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):
self.errors[kw] = self._parse_errorbars(kw, err)
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if 'cmap' in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif 'cmap' in kwds:
self.colormap = kwds.pop('cmap')
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
if ('color' in self.kwds and self.nseries == 1 and
not is_list_like(self.kwds['color'])):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds and isinstance(self.kwds['color'], tuple) and
self.nseries == 1 and len(self.kwds['color']) in (3, 4)):
# support RGB and RGBA tuples in series plot
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
if 'color' in self.kwds and self.style is not None:
if is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if re.match('^[a-z]+?', s) is not None:
raise ValueError(
"Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol")
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
# TODO: unused?
# if self.sort_columns:
# columns = _try_sort(data.columns)
# else:
# columns = data.columns
for col, values in data.iteritems():
if keep_index is True:
yield col, values
else:
yield col, values.values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._adorn_subplots()
for ax in self.axes:
self._post_plot_logic_common(ax, self.data)
self._post_plot_logic(ax, self.data)
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, 'right_ax'):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, 'left_ax'):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
# TODO: use Matplotlib public API when available
new_ax._get_lines = orig_ax._get_lines
new_ax._get_patches_for_fill = orig_ax._get_patches_for_fill
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(naxes=self.nseries,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
layout_type=self._layout_type)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
if self.logx or self.loglog:
[a.set_xscale('log') for a in axes]
if self.logy or self.loglog:
[a.set_yscale('log') for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, ABCSeries):
label = self.label
if label is None and data.name is None:
label = 'None'
data = data.to_frame(name=label)
# GH16953, _convert is needed as fallback, for ``Series``
# with ``dtype == object``
data = data._convert(datetime=True, timedelta=True)
numeric_data = data.select_dtypes(include=[np.number,
"datetime",
"datetimetz",
"timedelta"])
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic_common(self, ax, data):
"""Common post process for each axes"""
def get_label(i):
try:
return pprint_thing(data.index[i])
except Exception:
return ''
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [get_label(x) for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
if hasattr(ax, 'right_ax'):
self._apply_axis_properties(ax.right_ax.yaxis,
fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [get_label(y) for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
if hasattr(ax, 'right_ax'):
self._apply_axis_properties(ax.right_ax.yaxis,
fontsize=self.fontsize)
else: # pragma no cover
raise ValueError
def _post_plot_logic(self, ax, data):
"""Post process for each axes. Overridden in child classes"""
pass
def _adorn_subplots(self):
"""Common post process unrelated to data"""
if len(self.axes) > 0:
all_axes = self._get_subplots()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
if is_list_like(self.title):
if len(self.title) != self.nseries:
msg = ('The length of `title` must equal the number '
'of columns if using `title` of type `list` '
'and `subplots=True`.\n'
'length of title = {}\n'
'number of columns = {}').format(
len(self.title), self.nseries)
raise ValueError(msg)
for (ax, title) in zip(self.axes, self.title):
ax.set_title(title)
else:
self.fig.suptitle(self.title)
else:
if is_list_like(self.title):
msg = ('Using `title` of type `list` is not supported '
'unless `subplots=True` is passed')
raise ValueError(msg)
self.axes[0].set_title(self.title)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not isinstance(self.data.columns, MultiIndex):
name = self.data.columns.name
if name is not None:
name = pprint_thing(name)
return name
else:
stringified = map(pprint_thing,
self.data.columns.names)
return ','.join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if label is not None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + ' (right)'
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg = self._get_ax_legend(self.axes[0])
handles = []
labels = []
title = ''
if not self.subplots:
if leg is not None:
title = leg.get_title().get_text()
handles = leg.legendHandles
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == 'reverse':
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if self.legend_title is not None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc='best', title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc='best')
def _get_ax_legend(self, ax):
leg = ax.get_legend()
other_ax = (getattr(ax, 'left_ax', None) or
getattr(ax, 'right_ax', None))
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
@staticmethod
def mpl_ge_1_3_1():
return _mpl_ge_1_3_1()
@staticmethod
def mpl_ge_1_5_0():
return _mpl_ge_1_5_0()
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64', 'time')
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
self.data = self.data.reindex(index=index.sort_values())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data[notna(self.data.index)]
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = lrange(len(index))
else:
x = lrange(len(index))
return x
@classmethod
def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):
mask = isna(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if isinstance(x, Index):
x = x._mpl_repr()
if is_errorbar:
if 'xerr' in kwds:
kwds['xerr'] = np.array(kwds.get('xerr'))
if 'yerr' in kwds:
kwds['yerr'] = np.array(kwds.get('yerr'))
return ax.errorbar(x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is
# unsupported
if style is not None:
args = (x, y, style)
else:
args = (x, y)
return ax.plot(*args, **kwds)
def _get_index_name(self):
if isinstance(self.data.index, MultiIndex):
name = self.data.index.names
if _any_not_none(*name):
name = ','.join(pprint_thing(x) for x in name)
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = pprint_thing(name)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, 'left_ax', ax)
else:
return getattr(ax, 'right_ax', ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)):
return self.data.columns[i] in self.secondary_y
def _apply_style_colors(self, colors, kwds, col_num, label):
"""
Manage style and color based on column number and its label.
Returns tuple of appropriate style and kwds which "color" may be added.
"""
style = None
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[col_num]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(label, style)
else:
style = self.style
has_color = 'color' in kwds or self.colormap is not None
nocolor_style = style is None or re.match('[a-z]+', style) is None
if (has_color or self.subplots) and nocolor_style:
kwds['color'] = colors[col_num % len(colors)]
return style, kwds
def _get_colors(self, num_colors=None, color_kwds='color'):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds))
def _parse_errorbars(self, label, err):
"""
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
"""
if err is None:
return None
from pandas import DataFrame, Series
def match_labels(data, e):
e = e.reindex(data.index)
return e
# key-matched DataFrame
if isinstance(err, DataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, Series):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, string_types):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif is_list_like(err):
if is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (err_shape[0] != self.nseries) or \
(err_shape[1] != 2) or \
(err_shape[2] != len(self.data)):
msg = "Asymmetrical error bars should be provided " + \
"with the shape (%u, 2, %u)" % \
(self.nseries, len(self.data))
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid {label} detected".format(label=label)
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
from pandas import DataFrame
errors = {}
for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (DataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_subplots(self):
from matplotlib.axes import Subplot
return [ax for ax in self.axes[0].get_figure().get_axes()
if isinstance(ax, Subplot)]
def _get_axes_layout(self):
axes = self._get_subplots()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class PlanePlot(MPLPlot):
"""
Abstract class for plotting on plane, currently scatter and hexbin.
"""
_layout_type = 'single'
def __init__(self, data, x, y, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError(self._kind + ' requires and x and y column')
if is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if len(self.data[x]._get_numeric_data()) == 0:
raise ValueError(self._kind + ' requires x column to be numeric')
if len(self.data[y]._get_numeric_data()) == 0:
raise ValueError(self._kind + ' requires y column to be numeric')
self.x = x
self.y = y
@property
def nseries(self):
return 1
def _post_plot_logic(self, ax, data):
x, y = self.x, self.y
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
class ScatterPlot(PlanePlot):
_kind = 'scatter'
def __init__(self, data, x, y, s=None, c=None, **kwargs):
if s is None:
# hide the matplotlib default for size, in case we want to change
# the handling of this argument later
s = 20
super(ScatterPlot, self).__init__(data, x, y, s=s, **kwargs)
if is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.c = c
def _make_plot(self):
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = is_hashable(c) and c in self.data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop('colorbar', self.colormap or c_is_column)
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'Greys'
cmap = self.plt.cm.get_cmap(cmap)
color = self.kwds.pop("color", None)
if c is not None and color is not None:
raise TypeError('Specify exactly one of `c` and `color`')
elif c is None and color is None:
c_values = self.plt.rcParams['patch.facecolor']
elif color is not None:
c_values = color
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
if self.legend and hasattr(self, 'label'):
label = self.label
else:
label = None
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
label=label, cmap=cmap, **self.kwds)
if cb:
img = ax.collections[0]
kws = dict(ax=ax)
if self.mpl_ge_1_3_1():
kws['label'] = c if c_is_column else ''
self.fig.colorbar(img, **kws)
if label is not None:
self._add_legend_handle(scatter, label)
else:
self.legend = False
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values,
linestyle='none', **err_kwds)
class HexBinPlot(PlanePlot):
_kind = 'hexbin'
def __init__(self, data, x, y, C=None, **kwargs):
super(HexBinPlot, self).__init__(data, x, y, **kwargs)
if is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.C = C
def _make_plot(self):
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
cmap = self.plt.cm.get_cmap(cmap)
cb = self.kwds.pop('colorbar', True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,
**self.kwds)
if cb:
img = ax.collections[0]
self.fig.colorbar(img, ax=ax)
def _make_legend(self):
pass
class LinePlot(MPLPlot):
_kind = 'line'
_default_rot = 0
orientation = 'vertical'
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params['x_compat']
if 'x_compat' in self.kwds:
self.x_compat = bool(self.kwds.pop('x_compat'))
def _is_ts_plot(self):
# this is slightly deceptive
return not self.x_compat and self.use_index and self._use_dynamic_x()
def _use_dynamic_x(self):
from pandas.plotting._timeseries import _use_dynamic_x
return _use_dynamic_x(self._get_ax(0), self.data)
def _make_plot(self):
if self._is_ts_plot():
from pandas.plotting._timeseries import _maybe_convert_index
data = _maybe_convert_index(self._get_ax(0), self.data)
x = data.index # dummy, not used
plotf = self._ts_plot
it = self._iter_data(data=data, keep_index=True)
else:
x = self._get_xticks(convert_period=True)
plotf = self._plot
it = self._iter_data()
stacking_id = self._get_stacking_id()
is_errorbar = _any_not_none(*self.errors.values())
colors = self._get_colors()
for i, (label, y) in enumerate(it):
ax = self._get_ax(i)
kwds = self.kwds.copy()
style, kwds = self._apply_style_colors(colors, kwds, i, label)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = pprint_thing(label) # .encode('utf-8')
kwds['label'] = label
newlines = plotf(ax, x, y, style=style, column_num=i,
stacking_id=stacking_id,
is_errorbar=is_errorbar,
**kwds)
self._add_legend_handle(newlines[0], label, index=i)
if not _mpl_ge_2_0_0():
lines = _get_all_lines(ax)
left, right = _get_xlim(lines)
ax.set_xlim(left, right)
@classmethod
def _plot(cls, ax, x, y, style=None, column_num=None,
stacking_id=None, **kwds):
# column_num is used to get the target column from protf in line and
# area plots
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(y))
y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label'])
lines = MPLPlot._plot(ax, x, y_values, style=style, **kwds)
cls._update_stacker(ax, stacking_id, y)
return lines
@classmethod
def _ts_plot(cls, ax, x, data, style=None, **kwds):
from pandas.plotting._timeseries import (_maybe_resample,
_decorate_axes,
format_dateaxis)
# accept x to be consistent with normal plot func,
# x is not passed to tsplot as it uses data.index as x coordinate
# column_num must be in kwds for stacking purpose
freq, data = _maybe_resample(data, ax, kwds)
# Set ax with freq info
_decorate_axes(ax, freq, kwds)
# digging deeper
if hasattr(ax, 'left_ax'):
_decorate_axes(ax.left_ax, freq, kwds)
if hasattr(ax, 'right_ax'):
_decorate_axes(ax.right_ax, freq, kwds)
ax._plot_data.append((data, cls._kind, kwds))
lines = cls._plot(ax, data.index, data.values, style=style, **kwds)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq, data.index)
return lines
def _get_stacking_id(self):
if self.stacked:
return id(self.data)
else:
return None
@classmethod
def _initialize_stacker(cls, ax, stacking_id, n):
if stacking_id is None:
return
if not hasattr(ax, '_stacker_pos_prior'):
ax._stacker_pos_prior = {}
if not hasattr(ax, '_stacker_neg_prior'):
ax._stacker_neg_prior = {}
ax._stacker_pos_prior[stacking_id] = np.zeros(n)
ax._stacker_neg_prior[stacking_id] = np.zeros(n)
@classmethod
def _get_stacked_values(cls, ax, stacking_id, values, label):
if stacking_id is None:
return values
if not hasattr(ax, '_stacker_pos_prior'):
# stacker may not be initialized for subplots
cls._initialize_stacker(ax, stacking_id, len(values))
if (values >= 0).all():
return ax._stacker_pos_prior[stacking_id] + values
elif (values <= 0).all():
return ax._stacker_neg_prior[stacking_id] + values
raise ValueError('When stacked is True, each column must be either '
'all positive or negative.'
'{0} contains both positive and negative values'
.format(label))
@classmethod
def _update_stacker(cls, ax, stacking_id, values):
if stacking_id is None:
return
if (values >= 0).all():
ax._stacker_pos_prior[stacking_id] += values
elif (values <= 0).all():
ax._stacker_neg_prior[stacking_id] += values
def _post_plot_logic(self, ax, data):
condition = (not self._use_dynamic_x() and
data.index.is_all_dates and
not self.subplots or
(self.subplots and self.sharex))
index_name = self._get_index_name()
if condition:
# irregular TS rotated 30 deg. by default
# probably a better place to check / set this.
if not self._rot_set:
self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None and self.use_index:
ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
_kind = 'area'
def __init__(self, data, **kwargs):
kwargs.setdefault('stacked', True)
data = data.fillna(value=0)
LinePlot.__init__(self, data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault('alpha', 0.5)
if self.logy or self.loglog:
raise ValueError("Log-y scales are not supported in area plot")
@classmethod
def _plot(cls, ax, x, y, style=None, column_num=None,
stacking_id=None, is_errorbar=False, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(y))
y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label'])
# need to remove label, because subplots uses mpl legend as it is
line_kwds = kwds.copy()
if cls.mpl_ge_1_5_0():
line_kwds.pop('label')
lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds)
# get data from the line to get coordinates for fill_between
xdata, y_values = lines[0].get_data(orig=False)
# unable to use ``_get_stacked_values`` here to get starting point
if stacking_id is None:
start = np.zeros(len(y))
elif (y >= 0).all():
start = ax._stacker_pos_prior[stacking_id]
elif (y <= 0).all():
start = ax._stacker_neg_prior[stacking_id]
else:
start = np.zeros(len(y))
if 'color' not in kwds:
kwds['color'] = lines[0].get_color()
rect = ax.fill_between(xdata, start, y_values, **kwds)
cls._update_stacker(ax, stacking_id, y)
# LinePlot expects list of artists
res = [rect] if cls.mpl_ge_1_5_0() else lines
return res
def _add_legend_handle(self, handle, label, index=None):
if not self.mpl_ge_1_5_0():
from matplotlib.patches import Rectangle
# Because fill_between isn't supported in legend,
# specifically add Rectangle handle here
alpha = self.kwds.get('alpha', None)
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(),
alpha=alpha)
LinePlot._add_legend_handle(self, handle, label, index=index)
def _post_plot_logic(self, ax, data):
LinePlot._post_plot_logic(self, ax, data)
if self.ylim is None:
if (data >= 0).all().all():
ax.set_ylim(0, None)
elif (data <= 0).all().all():
ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
_kind = 'bar'
_default_rot = 90
orientation = 'vertical'
def __init__(self, data, **kwargs):
# we have to treat a series differently than a
# 1-column DataFrame w.r.t. color handling
self._is_series = isinstance(data, ABCSeries)
self.bar_width = kwargs.pop('width', 0.5)
pos = kwargs.pop('position', 0.5)
kwargs.setdefault('align', 'center')
self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop('bottom', 0)
self.left = kwargs.pop('left', 0)
self.log = kwargs.pop('log', False)
MPLPlot.__init__(self, data, **kwargs)
if self.stacked or self.subplots:
self.tickoffset = self.bar_width * pos
if kwargs['align'] == 'edge':
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
else:
if kwargs['align'] == 'edge':
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.ax_pos = self.tick_pos - self.tickoffset
def _args_adjust(self):
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
if is_list_like(self.left):
self.left = np.array(self.left)
@classmethod
def _plot(cls, ax, x, y, w, start=0, log=False, **kwds):
return ax.bar(x, y, w, bottom=start, log=log, **kwds)
@property
def _start_base(self):
return self.bottom
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
ncolors = len(colors)
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
if self._is_series:
kwds['color'] = colors
else:
kwds['color'] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = pprint_thing(label)
if (('yerr' in kwds) or ('xerr' in kwds)) \
and (kwds.get('ecolor') is None):
kwds['ecolor'] = mpl.rcParams['xtick.color']
start = 0
if self.log and (y >= 1).all():
start = 1
start = start + self._start_base
if self.subplots:
w = self.bar_width / 2
rect = self._plot(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label,
log=self.log, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior) + self._start_base
w = self.bar_width / 2
rect = self._plot(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label,
log=self.log, **kwds)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
rect = self._plot(ax, self.ax_pos + (i + 0.5) * w, y, w,
start=start, label=label,
log=self.log, **kwds)
self._add_legend_handle(rect, label, index=i)
def _post_plot_logic(self, ax, data):
if self.use_index:
str_index = [pprint_thing(key) for key in data.index]
else:
str_index = [pprint_thing(key) for key in range(data.shape[0])]
name = self._get_index_name()
s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
self._decorate_ticks(ax, name, str_index, s_edge, e_edge)
def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):
ax.set_xlim((start_edge, end_edge))
ax.set_xticks(self.tick_pos)
ax.set_xticklabels(ticklabels)
if name is not None and self.use_index:
ax.set_xlabel(name)
class BarhPlot(BarPlot):
_kind = 'barh'
_default_rot = 0
orientation = 'horizontal'
@property
def _start_base(self):
return self.left
@classmethod
def _plot(cls, ax, x, y, w, start=0, log=False, **kwds):
return ax.barh(x, y, w, left=start, log=log, **kwds)
def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):
# horizontal bars
ax.set_ylim((start_edge, end_edge))
ax.set_yticks(self.tick_pos)
ax.set_yticklabels(ticklabels)
if name is not None and self.use_index:
ax.set_ylabel(name)
class HistPlot(LinePlot):
_kind = 'hist'
def __init__(self, data, bins=10, bottom=0, **kwargs):
self.bins = bins # use mpl default
self.bottom = bottom
# Do not call LinePlot.__init__ which may fill nan
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if is_integer(self.bins):
# create common bin edge
values = (self.data._convert(datetime=True)._get_numeric_data())
values = np.ravel(values)
values = values[~isna(values)]
hist, self.bins = np.histogram(
values, bins=self.bins,
range=self.kwds.get('range', None),
weights=self.kwds.get('weights', None))
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
@classmethod
def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0,
stacking_id=None, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
y = y[~isna(y)]
base = np.zeros(len(bins) - 1)
bottom = bottom + \
cls._get_stacked_values(ax, stacking_id, base, kwds['label'])
# ignore style
n, bins, patches = ax.hist(y, bins=bins, bottom=bottom, **kwds)
cls._update_stacker(ax, stacking_id, n)
return patches
def _make_plot(self):
colors = self._get_colors()
stacking_id = self._get_stacking_id()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label)
kwds['label'] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds['style'] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i,
stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _make_plot_keywords(self, kwds, y):
"""merge BoxPlot/KdePlot properties to passed kwds"""
# y is required for KdePlot
kwds['bottom'] = self.bottom
kwds['bins'] = self.bins
return kwds
def _post_plot_logic(self, ax, data):
if self.orientation == 'horizontal':
ax.set_xlabel('Frequency')
else:
ax.set_ylabel('Frequency')
@property
def orientation(self):
if self.kwds.get('orientation', None) == 'horizontal':
return 'horizontal'
else:
return 'vertical'
class KdePlot(HistPlot):
_kind = 'kde'
orientation = 'vertical'
def __init__(self, data, bw_method=None, ind=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
self.bw_method = bw_method
self.ind = ind
def _args_adjust(self):
pass
def _get_ind(self, y):
if self.ind is None:
# np.nanmax() and np.nanmin() ignores the missing values
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(np.nanmin(y) - 0.5 * sample_range,
np.nanmax(y) + 0.5 * sample_range, 1000)
else:
ind = self.ind
return ind
@classmethod
def _plot(cls, ax, y, style=None, bw_method=None, ind=None,
column_num=None, stacking_id=None, **kwds):
from scipy.stats import gaussian_kde
from scipy import __version__ as spv
y = remove_na_arraylike(y)
if LooseVersion(spv) >= '0.11.0':
gkde = gaussian_kde(y, bw_method=bw_method)
else:
gkde = gaussian_kde(y)
if bw_method is not None:
msg = ('bw_method was added in Scipy 0.11.0.' +
' Scipy version in use is {spv}.'.format(spv=spv))
warnings.warn(msg)
y = gkde.evaluate(ind)
lines = MPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
def _make_plot_keywords(self, kwds, y):
kwds['bw_method'] = self.bw_method
kwds['ind'] = self._get_ind(y)
return kwds
def _post_plot_logic(self, ax, data):
ax.set_ylabel('Density')
class PiePlot(MPLPlot):
_kind = 'pie'
_layout_type = 'horizontal'
def __init__(self, data, kind=None, **kwargs):
data = data.fillna(value=0)
if (data < 0).any().any():
raise ValueError("{0} doesn't allow negative values".format(kind))
MPLPlot.__init__(self, data, kind=kind, **kwargs)
def _args_adjust(self):
self.grid = False
self.logy = False
self.logx = False
self.loglog = False
def _validate_color_args(self):
pass
def _make_plot(self):
colors = self._get_colors(
num_colors=len(self.data), color_kwds='colors')
self.kwds.setdefault('colors', colors)
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
if label is not None:
label = pprint_thing(label)
ax.set_ylabel(label)
kwds = self.kwds.copy()
def blank_labeler(label, value):
if value == 0:
return ''
else:
return label
idx = [pprint_thing(v) for v in self.data.index]
labels = kwds.pop('labels', idx)
# labels is used for each wedge's labels
# Blank out labels for values of 0 so they don't overlap
# with nonzero wedges
if labels is not None:
blabels = [blank_labeler(l, value) for
l, value in zip(labels, y)]
else:
blabels = None
results = ax.pie(y, labels=blabels, **kwds)
if kwds.get('autopct', None) is not None:
patches, texts, autotexts = results
else:
patches, texts = results
autotexts = []
if self.fontsize is not None:
for t in texts + autotexts:
t.set_fontsize(self.fontsize)
# leglabels is used for legend labels
leglabels = labels if labels is not None else idx
for p, l in zip(patches, leglabels):
self._add_legend_handle(p, l)
class BoxPlot(LinePlot):
_kind = 'box'
_layout_type = 'horizontal'
_valid_return_types = (None, 'axes', 'dict', 'both')
# namedtuple to hold results
BP = namedtuple("Boxplot", ['ax', 'lines'])
def __init__(self, data, return_type='axes', **kwargs):
# Do not call LinePlot.__init__ which may fill nan
if return_type not in self._valid_return_types:
raise ValueError(
"return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, all subplots shows last
# column label
if self.orientation == 'vertical':
self.sharex = False
else:
self.sharey = False
@classmethod
def _plot(cls, ax, y, column_num=None, return_type='axes', **kwds):
if y.ndim == 2:
y = [remove_na_arraylike(v) for v in y]
# Boxplot fails with empty arrays, so need to add a NaN
# if any cols are empty
# GH 8181
y = [v if v.size > 0 else np.array([np.nan]) for v in y]
else:
y = remove_na_arraylike(y)
bp = ax.boxplot(y, **kwds)
if return_type == 'dict':
return bp, bp
elif return_type == 'both':
return cls.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _validate_color_args(self):
if 'color' in self.kwds:
if self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
self.color = self.kwds.pop('color')
if isinstance(self.color, dict):
valid_keys = ['boxes', 'whiskers', 'medians', 'caps']
for key, values in compat.iteritems(self.color):
if key not in valid_keys:
raise ValueError("color dict contains invalid "
"key '{0}' "
"The key must be either {1}"
.format(key, valid_keys))
else:
self.color = None
# get standard colors for default
colors = _get_standard_colors(num_colors=3,
colormap=self.colormap,
color=None)
# use 2 colors by default, for box/whisker and median
# flier colors isn't needed here
# because it can be specified by ``sym`` kw
self._boxes_c = colors[0]
self._whiskers_c = colors[0]
self._medians_c = colors[2]
self._caps_c = 'k' # mpl default
def _get_colors(self, num_colors=None, color_kwds='color'):
pass
def maybe_color_bp(self, bp):
if isinstance(self.color, dict):
boxes = self.color.get('boxes', self._boxes_c)
whiskers = self.color.get('whiskers', self._whiskers_c)
medians = self.color.get('medians', self._medians_c)
caps = self.color.get('caps', self._caps_c)
else:
# Other types are forwarded to matplotlib
# If None, use default colors
boxes = self.color or self._boxes_c
whiskers = self.color or self._whiskers_c
medians = self.color or self._medians_c
caps = self.color or self._caps_c
from matplotlib.artist import setp
setp(bp['boxes'], color=boxes, alpha=1)
setp(bp['whiskers'], color=whiskers, alpha=1)
setp(bp['medians'], color=medians, alpha=1)
setp(bp['caps'], color=caps, alpha=1)
def _make_plot(self):
if self.subplots:
from pandas.core.series import Series
self._return_obj = Series()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
ret, bp = self._plot(ax, y, column_num=i,
return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj[label] = ret
label = [pprint_thing(label)]
self._set_ticklabels(ax, label)
else:
y = self.data.values.T
ax = self._get_ax(0)
kwds = self.kwds.copy()
ret, bp = self._plot(ax, y, column_num=0,
return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, _ in self._iter_data()]
labels = [pprint_thing(l) for l in labels]
if not self.use_index:
labels = [pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
def _set_ticklabels(self, ax, labels):
if self.orientation == 'vertical':
ax.set_xticklabels(labels)
else:
ax.set_yticklabels(labels)
def _make_legend(self):
pass
def _post_plot_logic(self, ax, data):
pass
@property
def orientation(self):
if self.kwds.get('vert', True):
return 'vertical'
else:
return 'horizontal'
@property
def result(self):
if self.return_type is None:
return super(BoxPlot, self).result
else:
return self._return_obj
# kinds supported by both dataframe and series
_common_kinds = ['line', 'bar', 'barh',
'kde', 'density', 'area', 'hist', 'box']
# kinds supported by dataframe
_dataframe_kinds = ['scatter', 'hexbin']
# kinds supported only by series or dataframe single column
_series_kinds = ['pie']
_all_kinds = _common_kinds + _dataframe_kinds + _series_kinds
_klasses = [LinePlot, BarPlot, BarhPlot, KdePlot, HistPlot, BoxPlot,
ScatterPlot, HexBinPlot, AreaPlot, PiePlot]
_plot_klass = {}
for klass in _klasses:
_plot_klass[klass._kind] = klass
def _plot(data, x=None, y=None, subplots=False,
ax=None, kind='line', **kwds):
kind = _get_standard_kind(kind.lower().strip())
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError("%r is not a valid plot kind" % kind)
if kind in _dataframe_kinds:
if isinstance(data, ABCDataFrame):
plot_obj = klass(data, x=x, y=y, subplots=subplots, ax=ax,
kind=kind, **kwds)
else:
raise ValueError("plot kind %r can only be used for data frames"
% kind)
elif kind in _series_kinds:
if isinstance(data, ABCDataFrame):
if y is None and subplots is False:
msg = "{0} requires either y column or 'subplots=True'"
raise ValueError(msg.format(kind))
elif y is not None:
if is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
# converted to series actually. copy to not modify
data = data[y].copy()
data.index.name = y
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
if isinstance(data, ABCDataFrame):
if x is not None:
if is_integer(x) and not data.columns.holds_integer():
x = data.columns[x]
elif not isinstance(data[x], ABCSeries):
raise ValueError("x must be a label or position")
data = data.set_index(x)
if y is not None:
if is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
elif not isinstance(data[y], ABCSeries):
raise ValueError("y must be a label or position")
label = kwds['label'] if 'label' in kwds else y
series = data[y].copy() # Don't modify
series.name = label
for kw in ['xerr', 'yerr']:
if (kw in kwds) and \
(isinstance(kwds[kw], string_types) or
is_integer(kwds[kw])):
try:
kwds[kw] = data[kwds[kw]]
except (IndexError, KeyError, TypeError):
pass
data = series
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
df_kind = """- 'scatter' : scatter plot
- 'hexbin' : hexbin plot"""
series_kind = ""
df_coord = """x : label or position, default None
y : label or position, default None
Allows plotting of one column versus another"""
series_coord = ""
df_unique = """stacked : boolean, default False in line and
bar plots, and True in area plot. If True, create stacked plot.
sort_columns : boolean, default False
Sort column names to determine plot ordering
secondary_y : boolean or sequence, default False
Whether to plot on the secondary y-axis
If a list/tuple, which columns to plot on secondary y-axis"""
series_unique = """label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right"""
df_ax = """ax : matplotlib axes object, default None
subplots : boolean, default False
Make separate subplots for each column
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in; Be aware, that passing in both an ax and sharex=True
will alter all x axis labels for all axis in a figure!
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible
layout : tuple (optional)
(rows, columns) for the layout of subplots"""
series_ax = """ax : matplotlib axes object
If not passed, uses gca()"""
df_note = """- If `kind` = 'scatter' and the argument `c` is the name of a dataframe
column, the values of that column are used to color each point.
- If `kind` = 'hexbin', you can control the size of the bins with the
`gridsize` argument. By default, a histogram of the counts around each
`(x, y)` point is computed. You can specify alternative aggregations
by passing values to the `C` and `reduce_C_function` arguments.
`C` specifies the value at each `(x, y)` point and `reduce_C_function`
is a function of one argument that reduces all the values in a bin to
a single number (e.g. `mean`, `max`, `sum`, `std`)."""
series_note = ""
_shared_doc_df_kwargs = dict(klass='DataFrame', klass_obj='df',
klass_kind=df_kind, klass_coord=df_coord,
klass_ax=df_ax, klass_unique=df_unique,
klass_note=df_note)
_shared_doc_series_kwargs = dict(klass='Series', klass_obj='s',
klass_kind=series_kind,
klass_coord=series_coord, klass_ax=series_ax,
klass_unique=series_unique,
klass_note=series_note)
_shared_docs['plot'] = """
Make plots of %(klass)s using matplotlib / pylab.
*New in version 0.17.0:* Each plot kind has a corresponding method on the
``%(klass)s.plot`` accessor:
``%(klass_obj)s.plot(kind='line')`` is equivalent to
``%(klass_obj)s.plot.line()``.
Parameters
----------
data : %(klass)s
%(klass_coord)s
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
%(klass_kind)s
%(klass_ax)s
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
%(klass_unique)s
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
`**kwds` : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
%(klass_note)s
"""
@Appender(_shared_docs['plot'] % _shared_doc_df_kwargs)
def plot_frame(data, x=None, y=None, kind='line', ax=None,
subplots=False, sharex=None, sharey=False, layout=None,
figsize=None, use_index=True, title=None, grid=None,
legend=True, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
secondary_y=False, sort_columns=False,
**kwds):
return _plot(data, kind=kind, x=x, y=y, ax=ax,
subplots=subplots, sharex=sharex, sharey=sharey,
layout=layout, figsize=figsize, use_index=use_index,
title=title, grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
secondary_y=secondary_y, sort_columns=sort_columns,
**kwds)
@Appender(_shared_docs['plot'] % _shared_doc_series_kwargs)
def plot_series(data, kind='line', ax=None, # Series unique
figsize=None, use_index=True, title=None, grid=None,
legend=False, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
label=None, secondary_y=False, # Series unique
**kwds):
import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
ax = _gca()
ax = MPLPlot._get_ax_layer(ax)
return _plot(data, kind=kind, ax=ax,
figsize=figsize, use_index=use_index, title=title,
grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
label=label, secondary_y=secondary_y,
**kwds)
_shared_docs['boxplot'] = """
Make a box plot from DataFrame column optionally grouped by some columns or
other inputs
Parameters
----------
data : the pandas object holding the data
column : column name or list of names, or vector
Can be any valid input to groupby
by : string or sequence
Column in the DataFrame to group by
ax : Matplotlib axes object, optional
fontsize : int or string
rot : label rotation angle
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
layout : tuple (optional)
(rows, columns) for the layout of the plot
return_type : {None, 'axes', 'dict', 'both'}, default None
The kind of object to return. The default is ``axes``
'axes' returns the matplotlib axes the boxplot is drawn on;
'dict' returns a dictionary whose values are the matplotlib
Lines of the boxplot;
'both' returns a namedtuple with the axes and dict.
When grouping with ``by``, a Series mapping columns to ``return_type``
is returned, unless ``return_type`` is None, in which case a NumPy
array of axes is returned with the same shape as ``layout``.
See the prose documentation for more.
`**kwds` : Keyword Arguments
All other plotting keyword arguments to be passed to
matplotlib's boxplot function
Returns
-------
lines : dict
ax : matplotlib Axes
(ax, lines): namedtuple
Notes
-----
Use ``return_type='dict'`` when you want to tweak the appearance
of the lines after plotting. In this case a dict containing the Lines
making up the boxes, caps, fliers, medians, and whiskers is returned.
"""
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot(data, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, figsize=None, layout=None, return_type=None,
**kwds):
# validate return_type:
if return_type not in BoxPlot._valid_return_types:
raise ValueError("return_type must be {'axes', 'dict', 'both'}")
from pandas import Series, DataFrame
if isinstance(data, Series):
data = DataFrame({'x': data})
column = 'x'
def _get_colors():
return _get_standard_colors(color=kwds.get('color'), num_colors=1)
def maybe_color_bp(bp):
if 'color' not in kwds:
from matplotlib.artist import setp
setp(bp['boxes'], color=colors[0], alpha=1)
setp(bp['whiskers'], color=colors[0], alpha=1)
setp(bp['medians'], color=colors[2], alpha=1)
def plot_group(keys, values, ax):
keys = [pprint_thing(x) for x in keys]
values = [np.asarray(remove_na_arraylike(v)) for v in values]
bp = ax.boxplot(values, **kwds)
if fontsize is not None:
ax.tick_params(axis='both', labelsize=fontsize)
if kwds.get('vert', 1):
ax.set_xticklabels(keys, rotation=rot)
else:
ax.set_yticklabels(keys, rotation=rot)
maybe_color_bp(bp)
# Return axes in multiplot case, maybe revisit later # 985
if return_type == 'dict':
return bp
elif return_type == 'both':
return BoxPlot.BP(ax=ax, lines=bp)
else:
return ax
colors = _get_colors()
if column is None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
# Prefer array return type for 2-D plots to match the subplot layout
# https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580
result = _grouped_plot_by_column(plot_group, data, columns=columns,
by=by, grid=grid, figsize=figsize,
ax=ax, layout=layout,
return_type=return_type)
else:
if return_type is None:
return_type = 'axes'
if layout is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
if ax is None:
rc = {'figure.figsize': figsize} if figsize is not None else {}
ax = _gca(rc)
data = data._get_numeric_data()
if columns is None:
columns = data.columns
else:
data = data[columns]
result = plot_group(columns, data.values.T, ax)
ax.grid(grid)
return result
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot_frame(self, column=None, by=None, ax=None, fontsize=None, rot=0,
grid=True, figsize=None, layout=None,
return_type=None, **kwds):
import matplotlib.pyplot as plt
_converter._WARN = False
ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize,
grid=grid, rot=rot, figsize=figsize, layout=layout,
return_type=return_type, **kwds)
plt.draw_if_interactive()
return ax
def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False,
**kwargs):
"""
Make a scatter plot from two DataFrame columns
Parameters
----------
data : DataFrame
x : Column name for the x-axis values
y : Column name for the y-axis values
ax : Matplotlib axis object
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
kwargs : other plotting keyword arguments
To be passed to scatter function
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
kwargs.setdefault('edgecolors', 'none')
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals, **kwargs)
ax.grid(grid)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
ax.grid(grid)
return fig
def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
"""
Draw histogram of the DataFrame's series using matplotlib / pylab.
Parameters
----------
data : DataFrame
column : string or sequence
If passed, will be used to limit data to a subset of columns
by : object, optional
If passed, then used to form histograms for separate groups
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
ax : matplotlib axes object, default None
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in; Be aware, that passing in both an ax and sharex=True
will alter all x axis labels for all subplots in a figure!
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible
figsize : tuple
The size of the figure to create in inches by default
layout : tuple, optional
Tuple of (rows, columns) for the layout of the histograms
bins : integer, default 10
Number of histogram bins to be used
`**kwds` : other plotting keyword arguments
To be passed to hist function
"""
_converter._WARN = False
if by is not None:
axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid,
figsize=figsize, sharex=sharex, sharey=sharey,
layout=layout, bins=bins, xlabelsize=xlabelsize,
xrot=xrot, ylabelsize=ylabelsize,
yrot=yrot, **kwds)
return axes
if column is not None:
if not isinstance(column, (list, np.ndarray, Index)):
column = [column]
data = data[column]
data = data._get_numeric_data()
naxes = len(data.columns)
fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False,
sharex=sharex, sharey=sharey, figsize=figsize,
layout=layout)
_axes = _flatten(axes)
for i, col in enumerate(_try_sort(data.columns)):
ax = _axes[i]
ax.hist(data[col].dropna().values, bins=bins, **kwds)
ax.set_title(col)
ax.grid(grid)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(wspace=0.3, hspace=0.3)
return axes
def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, figsize=None,
bins=10, **kwds):
"""
Draw histogram of the input series using matplotlib
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups
ax : matplotlib axis object
If not passed, uses gca()
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
figsize : tuple, default None
figure size in inches by default
bins: integer, default 10
Number of histogram bins to be used
`**kwds` : keywords
To be passed to the actual plotting function
Notes
-----
See matplotlib documentation online for more on this
"""
import matplotlib.pyplot as plt
if by is None:
if kwds.get('layout', None) is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
# hack until the plotting interface is a bit more unified
fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else
plt.figure(figsize=figsize))
if (figsize is not None and tuple(figsize) !=
tuple(fig.get_size_inches())):
fig.set_size_inches(*figsize, forward=True)
if ax is None:
ax = fig.gca()
elif ax.get_figure() != fig:
raise AssertionError('passed axis not bound to passed figure')
values = self.dropna().values
ax.hist(values, bins=bins, **kwds)
ax.grid(grid)
axes = np.array([ax])
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
else:
if 'figure' in kwds:
raise ValueError("Cannot pass 'figure' when using the "
"'by' argument, since a new 'Figure' instance "
"will be created")
axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize,
bins=bins, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot, **kwds)
if hasattr(axes, 'ndim'):
if axes.ndim == 1 and len(axes) == 1:
return axes[0]
return axes
def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None,
layout=None, sharex=False, sharey=False, rot=90, grid=True,
xlabelsize=None, xrot=None, ylabelsize=None, yrot=None,
**kwargs):
"""
Grouped histogram
Parameters
----------
data: Series/DataFrame
column: object, optional
by: object, optional
ax: axes, optional
bins: int, default 50
figsize: tuple, optional
layout: optional
sharex: boolean, default False
sharey: boolean, default False
rot: int, default 90
grid: bool, default True
kwargs: dict, keyword arguments passed to matplotlib.Axes.hist
Returns
-------
axes: collection of Matplotlib Axes
"""
_converter._WARN = False
def plot_group(group, ax):
ax.hist(group.dropna().values, bins=bins, **kwargs)
xrot = xrot or rot
fig, axes = _grouped_plot(plot_group, data, column=column,
by=by, sharex=sharex, sharey=sharey, ax=ax,
figsize=figsize, layout=layout, rot=rot)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9,
hspace=0.5, wspace=0.3)
return axes
def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
rot=0, grid=True, ax=None, figsize=None,
layout=None, **kwds):
"""
Make box plots from DataFrameGroupBy data.
Parameters
----------
grouped : Grouped DataFrame
subplots :
* ``False`` - no subplots will be used
* ``True`` - create a subplot for each group
column : column name or list of names, or vector
Can be any valid input to groupby
fontsize : int or string
rot : label rotation angle
grid : Setting this to True will show the grid
ax : Matplotlib axis object, default None
figsize : A tuple (width, height) in inches
layout : tuple (optional)
(rows, columns) for the layout of the plot
`**kwds` : Keyword Arguments
All other plotting keyword arguments to be passed to
matplotlib's boxplot function
Returns
-------
dict of key/value = group key/DataFrame.boxplot return value
or DataFrame.boxplot return value in case subplots=figures=False
Examples
--------
>>> import pandas
>>> import numpy as np
>>> import itertools
>>>
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
>>> index = pandas.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
>>> data = np.random.randn(len(index),4)
>>> df = pandas.DataFrame(data, columns=list('ABCD'), index=index)
>>>
>>> grouped = df.groupby(level='lvl1')
>>> boxplot_frame_groupby(grouped)
>>>
>>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1)
>>> boxplot_frame_groupby(grouped, subplots=False)
"""
_converter._WARN = False
if subplots is True:
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, squeeze=False,
ax=ax, sharex=False, sharey=True,
figsize=figsize, layout=layout)
axes = _flatten(axes)
from pandas.core.series import Series
ret = Series()
for (key, group), ax in zip(grouped, axes):
d = group.boxplot(ax=ax, column=column, fontsize=fontsize,
rot=rot, grid=grid, **kwds)
ax.set_title(pprint_thing(key))
ret.loc[key] = d
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1,
right=0.9, wspace=0.2)
else:
from pandas.core.reshape.concat import concat
keys, frames = zip(*grouped)
if grouped.axis == 0:
df = concat(frames, keys=keys, axis=1)
else:
if len(frames) > 1:
df = frames[0].join(frames[1::])
else:
df = frames[0]
ret = df.boxplot(column=column, fontsize=fontsize, rot=rot,
grid=grid, ax=ax, figsize=figsize,
layout=layout, **kwds)
return ret
def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,
figsize=None, sharex=True, sharey=True, layout=None,
rot=0, ax=None, **kwargs):
from pandas import DataFrame
if figsize == 'default':
# allowed to specify mpl default with 'default'
warnings.warn("figsize='default' is deprecated. Specify figure"
"size by tuple instead", FutureWarning, stacklevel=4)
figsize = None
grouped = data.groupby(by)
if column is not None:
grouped = grouped[column]
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, figsize=figsize,
sharex=sharex, sharey=sharey, ax=ax,
layout=layout)
_axes = _flatten(axes)
for i, (key, group) in enumerate(grouped):
ax = _axes[i]
if numeric_only and isinstance(group, DataFrame):
group = group._get_numeric_data()
plotf(group, ax, **kwargs)
ax.set_title(pprint_thing(key))
return fig, axes
def _grouped_plot_by_column(plotf, data, columns=None, by=None,
numeric_only=True, grid=False,
figsize=None, ax=None, layout=None,
return_type=None, **kwargs):
grouped = data.groupby(by)
if columns is None:
if not isinstance(by, (list, tuple)):
by = [by]
columns = data._get_numeric_data().columns.difference(by)
naxes = len(columns)
fig, axes = _subplots(naxes=naxes, sharex=True, sharey=True,
figsize=figsize, ax=ax, layout=layout)
_axes = _flatten(axes)
ax_values = []
for i, col in enumerate(columns):
ax = _axes[i]
gp_col = grouped[col]
keys, values = zip(*gp_col)
re_plotf = plotf(keys, values, ax, **kwargs)
ax.set_title(col)
ax.set_xlabel(pprint_thing(by))
ax_values.append(re_plotf)
ax.grid(grid)
from pandas.core.series import Series
result = Series(ax_values, index=columns)
# Return axes in multiplot case, maybe revisit later # 985
if return_type is None:
result = axes
byline = by[0] if len(by) == 1 else by
fig.suptitle('Boxplot grouped by {byline}'.format(byline=byline))
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return result
class BasePlotMethods(PandasObject):
def __init__(self, data):
self._data = data
def __call__(self, *args, **kwargs):
raise NotImplementedError
class SeriesPlotMethods(BasePlotMethods):
"""Series plotting accessor and method
Examples
--------
>>> s.plot.line()
>>> s.plot.bar()
>>> s.plot.hist()
Plotting methods can also be accessed by calling the accessor as a method
with the ``kind`` argument:
``s.plot(kind='line')`` is equivalent to ``s.plot.line()``
"""
def __call__(self, kind='line', ax=None,
figsize=None, use_index=True, title=None, grid=None,
legend=False, style=None, logx=False, logy=False,
loglog=False, xticks=None, yticks=None,
xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
label=None, secondary_y=False, **kwds):
return plot_series(self._data, kind=kind, ax=ax, figsize=figsize,
use_index=use_index, title=title, grid=grid,
legend=legend, style=style, logx=logx, logy=logy,
loglog=loglog, xticks=xticks, yticks=yticks,
xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize,
colormap=colormap, table=table, yerr=yerr,
xerr=xerr, label=label, secondary_y=secondary_y,
**kwds)
__call__.__doc__ = plot_series.__doc__
def line(self, **kwds):
"""
Line plot
Parameters
----------
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='line', **kwds)
def bar(self, **kwds):
"""
Vertical bar plot
Parameters
----------
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='bar', **kwds)
def barh(self, **kwds):
"""
Horizontal bar plot
Parameters
----------
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='barh', **kwds)
def box(self, **kwds):
"""
Boxplot
Parameters
----------
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='box', **kwds)
def hist(self, bins=10, **kwds):
"""
Histogram
Parameters
----------
bins: integer, default 10
Number of histogram bins to be used
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='hist', bins=bins, **kwds)
def kde(self, **kwds):
"""
Kernel Density Estimate plot
Parameters
----------
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='kde', **kwds)
density = kde
def area(self, **kwds):
"""
Area plot
Parameters
----------
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='area', **kwds)
def pie(self, **kwds):
"""
Pie chart
Parameters
----------
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='pie', **kwds)
class FramePlotMethods(BasePlotMethods):
"""DataFrame plotting accessor and method
Examples
--------
>>> df.plot.line()
>>> df.plot.scatter('x', 'y')
>>> df.plot.hexbin()
These plotting methods can also be accessed by calling the accessor as a
method with the ``kind`` argument:
``df.plot(kind='line')`` is equivalent to ``df.plot.line()``
"""
def __call__(self, x=None, y=None, kind='line', ax=None,
subplots=False, sharex=None, sharey=False, layout=None,
figsize=None, use_index=True, title=None, grid=None,
legend=True, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
secondary_y=False, sort_columns=False, **kwds):
return plot_frame(self._data, kind=kind, x=x, y=y, ax=ax,
subplots=subplots, sharex=sharex, sharey=sharey,
layout=layout, figsize=figsize, use_index=use_index,
title=title, grid=grid, legend=legend, style=style,
logx=logx, logy=logy, loglog=loglog, xticks=xticks,
yticks=yticks, xlim=xlim, ylim=ylim, rot=rot,
fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr, secondary_y=secondary_y,
sort_columns=sort_columns, **kwds)
__call__.__doc__ = plot_frame.__doc__
def line(self, x=None, y=None, **kwds):
"""
Line plot
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='line', x=x, y=y, **kwds)
def bar(self, x=None, y=None, **kwds):
"""
Vertical bar plot
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='bar', x=x, y=y, **kwds)
def barh(self, x=None, y=None, **kwds):
"""
Horizontal bar plot
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='barh', x=x, y=y, **kwds)
def box(self, by=None, **kwds):
r"""
Boxplot
Parameters
----------
by : string or sequence
Column in the DataFrame to group by.
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='box', by=by, **kwds)
def hist(self, by=None, bins=10, **kwds):
"""
Histogram
Parameters
----------
by : string or sequence
Column in the DataFrame to group by.
bins: integer, default 10
Number of histogram bins to be used
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='hist', by=by, bins=bins, **kwds)
def kde(self, **kwds):
"""
Kernel Density Estimate plot
Parameters
----------
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='kde', **kwds)
density = kde
def area(self, x=None, y=None, **kwds):
"""
Area plot
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='area', x=x, y=y, **kwds)
def pie(self, y=None, **kwds):
"""
Pie chart
Parameters
----------
y : label or position, optional
Column to plot.
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='pie', y=y, **kwds)
def scatter(self, x, y, s=None, c=None, **kwds):
"""
Scatter plot
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
s : scalar or array_like, optional
Size of each point.
c : label or position, optional
Color of each point.
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds)
def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None,
**kwds):
"""
Hexbin plot
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
C : label or position, optional
The value at each `(x, y)` point.
reduce_C_function : callable, optional
Function of one argument that reduces all the values in a bin to
a single number (e.g. `mean`, `max`, `sum`, `std`).
gridsize : int, optional
Number of bins.
`**kwds` : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
if reduce_C_function is not None:
kwds['reduce_C_function'] = reduce_C_function
if gridsize is not None:
kwds['gridsize'] = gridsize
return self(kind='hexbin', x=x, y=y, C=C, **kwds)
| bsd-3-clause |
jungla/ICOM-fluidity-toolbox | Detectors/offline_advection/advect_particles_C_3Db_big.py | 1 | 4082 | import os, sys
import myfun
import numpy as np
import lagrangian_stats
import scipy.interpolate as interpolate
import csv
import matplotlib.pyplot as plt
import advect_functions
import fio
from intergrid import Intergrid
## READ archive (too many points... somehow)
# args: name, dayi, dayf, days
#label = 'm_25_2_512'
label = 'm_25_1b_particles'
dayi = 60 #10*24*1
dayf = 500 #10*24*4
days = 1
#label = sys.argv[1]
#basename = sys.argv[2]
#dayi = int(sys.argv[3])
#dayf = int(sys.argv[4])
#days = int(sys.argv[5])
path = '../../2D/U/Velocity_CG/'
time = range(dayi,dayf,days)
# dimensions archives
# ML exp
#Xlist = np.linspace(0,10000,801)
#Ylist = np.linspace(0,4000,321)
Xlist = np.linspace(0,8000,641)
Ylist = np.linspace(0,8000,641)
dl = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]
Zlist = 1.*np.cumsum(dl)
maps = [Xlist,Ylist,Zlist]
lo = np.array([ 0, 0, 0])
hi = np.array([ 8000, 8000, 50]) # highest lat, highest lon
#lo = np.array([ 0, 0, 0])
#hi = np.array([ 10000, 4000, 50]) # highest lat, highest lon
[X,Y,Z] = myfun.meshgrid2(Xlist,Ylist,Zlist)
Y = np.reshape(Y,(np.size(Y),))
X = np.reshape(X,(np.size(X),))
Z = np.reshape(Z,(np.size(Z),))
xn = len(Xlist)
yn = len(Ylist)
zn = len(Zlist)
dx = np.gradient(Xlist)
dy = np.gradient(Ylist)
dz = np.gradient(Zlist)
#dt = 360
dt = 1440
time = np.asarray(range(dayi,dayf,days))
print time[0]
# initial particles position
x0 = range(0,5010,10)
y0 = range(0,5010,10)
#z0 = [5,10,17]
#x0 = range(3000,4010,10)
#y0 = range(2000,3010,10)
#z0 = range(1,20,4)
z0 = [0,5,10,15]
xp = len(x0)
yp = len(y0)
zp = len(z0)
pt = xp*yp*zp
[z0,y0,x0] = myfun.meshgrid2(z0,y0,x0)
x0 = np.reshape(x0, (np.size(x0)))
y0 = np.reshape(y0, (np.size(y0)))
z0 = np.reshape(z0, (np.size(z0)))
#levels = np.zeros(x0.shape) + 1.
#levels[np.where(z0 != 2)] = np.nan
#x0 = lo[0] + np.random.uniform( size=(pt) ) * (hi[0] - lo[0])
#y0 = lo[1] + np.random.uniform( size=(pt) ) * (hi[1] - lo[1])
#z0 = lo[2] + np.random.uniform( size=(pt) ) * (hi[2] - lo[2])
#z0 = z0*0-1.
x = np.zeros((pt))
y = np.zeros((pt))
z = np.zeros((pt))
## ADVECT PARTICLES
kick = 5.
#filename = './traj_'+label+'_'+str(dayi)+'_'+str(dayf)+'_3D.csv'
filename = './traj_'+label+'_'+str(dayi)+'_'+str(dayf)+'_3D_big.csv'
print filename
fd = open(filename,'wb')
for p in range(pt):
fd.write(str(x0[p])+', '+str(y0[p])+', '+str(-1.*z0[p])+', '+str(time[0])+'\n')
import random
for t in range(len(time)-1):
print 'time:', time[t]
file0 = path+'Velocity_CG_0_'+label+'_'+str(time[t])+'.csv'
file1 = path+'Velocity_CG_1_'+label+'_'+str(time[t])+'.csv'
file2 = path+'Velocity_CG_2_'+label+'_'+str(time[t])+'.csv'
Ut0 = fio.read_Scalar(file0,xn,yn,zn)
Vt0 = fio.read_Scalar(file1,xn,yn,zn)
Wt0 = -1.*fio.read_Scalar(file2,xn,yn,zn) #0*Ut0
file0 = path+'Velocity_CG_0_'+label+'_'+str(time[t+1])+'.csv'
file1 = path+'Velocity_CG_1_'+label+'_'+str(time[t+1])+'.csv'
file2 = path+'Velocity_CG_2_'+label+'_'+str(time[t+1])+'.csv'
Ut1 = fio.read_Scalar(file0,xn,yn,zn)
Vt1 = fio.read_Scalar(file1,xn,yn,zn)
Wt1 = -1.*fio.read_Scalar(file2,xn,yn,zn) #0*Ut0
# subcycling
nt = 20
ds = 1.*dt / nt
# for st in range(nt+1):
# print st
# Us0 = (Ut1*st + Ut0*(nt-st))/(nt)
# Us1 = (Ut1*(st+1) + Ut0*(nt-st-1))/(nt)
# Vs0 = (Vt1*st + Vt0*(nt-st))/(nt)
# Vs1 = (Vt1*(st+1) + Vt0*(nt-st-1))/(nt)
# Ws0 = (Wt1*st + Wt0*(nt-st))/(nt)
# Ws1 = (Wt1*(st+1) + Wt0*(nt-st-1))/(nt)
# x0,y0,z0 = advect_functions.RK4(x0,y0,z0,Us0,Vs0,Ws0,Us1,Vs1,Ws1,lo,hi,maps,ds)
x0,y0,z0 = advect_functions.RK4(x0,y0,z0,Ut0,Vt0,Wt0,Ut1,Vt1,Wt1,lo,hi,maps,dt)
#x0,y0,z0 = advect_functions.EULER(x0,y0,z0,Ut0,Vt0,Wt0,lo,hi,maps,dt)
# random.seed()
# random kick
# for i in range(len(x0)):
# x0[i] = x0[i] + random.uniform(-kick,kick)
# y0[i] = y0[i] + random.uniform(-kick,kick)
x0,y0,z0 = advect_functions.pBC(x0,y0,z0,lo,hi)
# x1,y1,z1 = x0,y0,z0
# write
for p in range(pt):
fd.write(str(x0[p])+', '+str(y0[p])+', '+str(-1.*z0[p])+', '+str(time[t+1])+'\n')
fd.close()
| gpl-2.0 |
mkowoods/deep-learning | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/io/json/json.py | 6 | 24553 | # pylint: disable-msg=E1101,W0613,W0603
import os
import numpy as np
import pandas._libs.json as json
from pandas._libs.tslib import iNaT
from pandas.compat import StringIO, long, u
from pandas import compat, isnull
from pandas import Series, DataFrame, to_datetime, MultiIndex
from pandas.io.common import get_filepath_or_buffer, _get_handle
from pandas.core.common import AbstractMethodError
from pandas.io.formats.printing import pprint_thing
from .normalize import _convert_to_line_delimits
from .table_schema import build_table_schema
from pandas.core.dtypes.common import is_period_dtype
loads = json.loads
dumps = json.dumps
TABLE_SCHEMA_VERSION = '0.20.0'
# interface to/from
def to_json(path_or_buf, obj, orient=None, date_format='epoch',
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False):
if lines and orient != 'records':
raise ValueError(
"'lines' keyword only valid when 'orient' is records")
if orient == 'table' and isinstance(obj, Series):
obj = obj.to_frame(name=obj.name or 'values')
if orient == 'table' and isinstance(obj, DataFrame):
writer = JSONTableWriter
elif isinstance(obj, Series):
writer = SeriesWriter
elif isinstance(obj, DataFrame):
writer = FrameWriter
else:
raise NotImplementedError("'obj' should be a Series or a DataFrame")
s = writer(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler).write()
if lines:
s = _convert_to_line_delimits(s)
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, 'w') as fh:
fh.write(s)
elif path_or_buf is None:
return s
else:
path_or_buf.write(s)
class Writer(object):
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, default_handler=None):
self.obj = obj
if orient is None:
orient = self._default_orient
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.is_copy = None
self._format_axes()
def _format_axes(self):
raise AbstractMethodError(self)
def write(self):
return dumps(
self.obj,
orient=self.orient,
double_precision=self.double_precision,
ensure_ascii=self.ensure_ascii,
date_unit=self.date_unit,
iso_dates=self.date_format == 'iso',
default_handler=self.default_handler
)
class SeriesWriter(Writer):
_default_orient = 'index'
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == 'index':
raise ValueError("Series index must be unique for orient="
"'%s'" % self.orient)
class FrameWriter(Writer):
_default_orient = 'columns'
def _format_axes(self):
""" try to axes if they are datelike """
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
raise ValueError("DataFrame index must be unique for orient="
"'%s'." % self.orient)
if not self.obj.columns.is_unique and self.orient in (
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
"'%s'." % self.orient)
class JSONTableWriter(FrameWriter):
_default_orient = 'records'
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, default_handler=None):
"""
Adds a `schema` attribut with the Table Schema, resets
the index (can't do in caller, because the schema inference needs
to know what the index is, forces orient to records, and forces
date_format to 'iso'.
"""
super(JSONTableWriter, self).__init__(
obj, orient, date_format, double_precision, ensure_ascii,
date_unit, default_handler=default_handler)
if date_format != 'iso':
msg = ("Trying to write with `orient='table'` and "
"`date_format='%s'`. Table Schema requires dates "
"to be formatted with `date_format='iso'`" % date_format)
raise ValueError(msg)
self.schema = build_table_schema(obj)
# NotImplementd on a column MultiIndex
if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):
raise NotImplementedError(
"orient='table' is not supported for MultiIndex")
# TODO: Do this timedelta properly in objToJSON.c See GH #15137
if ((obj.ndim == 1) and (obj.name in set(obj.index.names)) or
len(obj.columns & obj.index.names)):
msg = "Overlapping names between the index and columns"
raise ValueError(msg)
obj = obj.copy()
timedeltas = obj.select_dtypes(include=['timedelta']).columns
if len(timedeltas):
obj[timedeltas] = obj[timedeltas].applymap(
lambda x: x.isoformat())
# Convert PeriodIndex to datetimes before serialzing
if is_period_dtype(obj.index):
obj.index = obj.index.to_timestamp()
self.obj = obj.reset_index()
self.date_format = 'iso'
self.orient = 'records'
def write(self):
data = super(JSONTableWriter, self).write()
serialized = '{{"schema": {}, "data": {}}}'.format(
dumps(self.schema), data)
return serialized
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
convert_axes=True, convert_dates=True, keep_default_dates=True,
numpy=False, precise_float=False, date_unit=None, encoding=None,
lines=False):
"""
Convert a JSON string to pandas object
Parameters
----------
path_or_buf : a valid JSON string or file-like, default: None
The string could be a URL. Valid URL schemes include http, ftp, s3, and
file. For file URLs, a host is expected. For instance, a local file
could be ``file://localhost/path/to/table.json``
orient : string,
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
The set of possible orients is:
- ``'split'`` : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- ``'records'`` : list like
``[{column -> value}, ... , {column -> value}]``
- ``'index'`` : dict like ``{index -> {column -> value}}``
- ``'columns'`` : dict like ``{column -> {index -> value}}``
- ``'values'`` : just the values array
The allowed and default values depend on the value
of the `typ` parameter.
* when ``typ == 'series'``,
- allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- The Series index must be unique for orient ``'index'``.
* when ``typ == 'frame'``,
- allowed orients are ``{'split','records','index',
'columns','values'}``
- default is ``'columns'``
- The DataFrame index must be unique for orients ``'index'`` and
``'columns'``.
- The DataFrame columns must be unique for orients ``'index'``,
``'columns'``, and ``'records'``.
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default True
If True, infer dtypes, if a dict of column to dtype, then use those,
if False, then don't infer dtypes at all, applies only to the data.
convert_axes : boolean, default True
Try to convert the axes to the proper dtypes.
convert_dates : boolean, default True
List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``
keep_default_dates : boolean, default True
If parsing dates, then parse the default datelike columns
numpy : boolean, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : boolean, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality
date_unit : string, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
lines : boolean, default False
Read the file as a json object per line.
.. versionadded:: 0.19.0
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
.. versionadded:: 0.19.0
Returns
-------
result : Series or DataFrame, depending on the value of `typ`.
See Also
--------
DataFrame.to_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
>>> pd.read_json(_, orient='split')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
>>> pd.read_json(_, orient='index')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> pd.read_json(_, orient='records')
col 1 col 2
0 a b
1 c d
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
filepath_or_buffer, _, _ = get_filepath_or_buffer(path_or_buf,
encoding=encoding)
if isinstance(filepath_or_buffer, compat.string_types):
try:
exists = os.path.exists(filepath_or_buffer)
# if the filepath is too long will raise here
# 5874
except (TypeError, ValueError):
exists = False
if exists:
fh, handles = _get_handle(filepath_or_buffer, 'r',
encoding=encoding)
json = fh.read()
fh.close()
else:
json = filepath_or_buffer
elif hasattr(filepath_or_buffer, 'read'):
json = filepath_or_buffer.read()
else:
json = filepath_or_buffer
if lines:
# If given a json lines file, we break the string into lines, add
# commas and put it in a json list to make a valid json object.
lines = list(StringIO(json.strip()))
json = '[' + ','.join(lines) + ']'
obj = None
if typ == 'frame':
obj = FrameParser(json, orient, dtype, convert_axes, convert_dates,
keep_default_dates, numpy, precise_float,
date_unit).parse()
if typ == 'series' or obj is None:
if not isinstance(dtype, bool):
dtype = dict(data=dtype)
obj = SeriesParser(json, orient, dtype, convert_axes, convert_dates,
keep_default_dates, numpy, precise_float,
date_unit).parse()
return obj
class Parser(object):
_STAMP_UNITS = ('s', 'ms', 'us', 'ns')
_MIN_STAMPS = {
's': long(31536000),
'ms': long(31536000000),
'us': long(31536000000000),
'ns': long(31536000000000000)}
def __init__(self, json, orient, dtype=True, convert_axes=True,
convert_dates=True, keep_default_dates=False, numpy=False,
precise_float=False, date_unit=None):
self.json = json
if orient is None:
orient = self._default_orient
self.orient = orient
self.dtype = dtype
if orient == "split":
numpy = False
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
raise ValueError('date_unit must be one of %s' %
(self._STAMP_UNITS,))
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS['s']
self.numpy = numpy
self.precise_float = precise_float
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.date_unit = date_unit
self.keep_default_dates = keep_default_dates
self.obj = None
def check_keys_split(self, decoded):
"checks that dict has only the appropriate keys for orient='split'"
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError(u("JSON data had unexpected key(s): %s") %
pprint_thing(bad_keys))
def parse(self):
# try numpy
numpy = self.numpy
if numpy:
self._parse_numpy()
else:
self._parse_no_numpy()
if self.obj is None:
return None
if self.convert_axes:
self._convert_axes()
self._try_convert_types()
return self.obj
def _convert_axes(self):
""" try to convert axes """
for axis in self.obj._AXIS_NUMBERS.keys():
new_axis, result = self._try_convert_data(
axis, self.obj._get_axis(axis), use_dtypes=False,
convert_dates=True)
if result:
setattr(self.obj, axis, new_axis)
def _try_convert_types(self):
raise AbstractMethodError(self)
def _try_convert_data(self, name, data, use_dtypes=True,
convert_dates=True):
""" try to parse a ndarray like into a column by inferring dtype """
# don't try to coerce, unless a force conversion
if use_dtypes:
if self.dtype is False:
return data, False
elif self.dtype is True:
pass
else:
# dtype to force
dtype = (self.dtype.get(name)
if isinstance(self.dtype, dict) else self.dtype)
if dtype is not None:
try:
dtype = np.dtype(dtype)
return data.astype(dtype), True
except:
return data, False
if convert_dates:
new_data, result = self._try_convert_to_date(data)
if result:
return new_data, True
result = False
if data.dtype == 'object':
# try float
try:
data = data.astype('float64')
result = True
except:
pass
if data.dtype.kind == 'f':
if data.dtype != 'float64':
# coerce floats to 64
try:
data = data.astype('float64')
result = True
except:
pass
# do't coerce 0-len data
if len(data) and (data.dtype == 'float' or data.dtype == 'object'):
# coerce ints if we can
try:
new_data = data.astype('int64')
if (new_data == data).all():
data = new_data
result = True
except:
pass
# coerce ints to 64
if data.dtype == 'int':
# coerce floats to 64
try:
data = data.astype('int64')
result = True
except:
pass
return data, result
def _try_convert_to_date(self, data):
""" try to parse a ndarray like into a date column
try to coerce object in epoch/iso formats and
integer/float in epcoh formats, return a boolean if parsing
was successful """
# no conversion on empty
if not len(data):
return data, False
new_data = data
if new_data.dtype == 'object':
try:
new_data = data.astype('int64')
except:
pass
# ignore numbers that are out of range
if issubclass(new_data.dtype.type, np.number):
in_range = (isnull(new_data.values) | (new_data > self.min_stamp) |
(new_data.values == iNaT))
if not in_range.all():
return data, False
date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
for date_unit in date_units:
try:
new_data = to_datetime(new_data, errors='raise',
unit=date_unit)
except ValueError:
continue
except:
break
return new_data, True
return data, False
def _try_convert_dates(self):
raise AbstractMethodError(self)
class SeriesParser(Parser):
_default_orient = 'index'
_split_keys = ('name', 'index', 'data')
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = dict((str(k), v)
for k, v in compat.iteritems(loads(
json,
precise_float=self.precise_float)))
self.check_keys_split(decoded)
self.obj = Series(dtype=None, **decoded)
else:
self.obj = Series(
loads(json, precise_float=self.precise_float), dtype=None)
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))
self.check_keys_split(decoded)
self.obj = Series(**decoded)
elif orient == "columns" or orient == "index":
self.obj = Series(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
else:
self.obj = Series(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
def _try_convert_types(self):
if self.obj is None:
return
obj, result = self._try_convert_data(
'data', self.obj, convert_dates=self.convert_dates)
if result:
self.obj = obj
class FrameParser(Parser):
_default_orient = 'columns'
_split_keys = ('columns', 'index', 'data')
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
args = loads(json, dtype=None, numpy=True, labelled=True,
precise_float=self.precise_float)
if args:
args = (args[0].T, args[2], args[1])
self.obj = DataFrame(*args)
elif orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))
self.check_keys_split(decoded)
self.obj = DataFrame(**decoded)
elif orient == "values":
self.obj = DataFrame(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
else:
self.obj = DataFrame(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
elif orient == "split":
decoded = dict((str(k), v)
for k, v in compat.iteritems(loads(
json,
precise_float=self.precise_float)))
self.check_keys_split(decoded)
self.obj = DataFrame(dtype=None, **decoded)
elif orient == "index":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None).T
else:
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
def _process_converter(self, f, filt=None):
""" take a conversion function and possibly recreate the frame """
if filt is None:
filt = lambda col, c: True
needs_new_obj = False
new_obj = dict()
for i, (col, c) in enumerate(self.obj.iteritems()):
if filt(col, c):
new_data, result = f(col, c)
if result:
c = new_data
needs_new_obj = True
new_obj[i] = c
if needs_new_obj:
# possibly handle dup columns
new_obj = DataFrame(new_obj, index=self.obj.index)
new_obj.columns = self.obj.columns
self.obj = new_obj
def _try_convert_types(self):
if self.obj is None:
return
if self.convert_dates:
self._try_convert_dates()
self._process_converter(
lambda col, c: self._try_convert_data(col, c, convert_dates=False))
def _try_convert_dates(self):
if self.obj is None:
return
# our columns to parse
convert_dates = self.convert_dates
if convert_dates is True:
convert_dates = []
convert_dates = set(convert_dates)
def is_ok(col):
""" return if this col is ok to try for a date parse """
if not isinstance(col, compat.string_types):
return False
col_lower = col.lower()
if (col_lower.endswith('_at') or
col_lower.endswith('_time') or
col_lower == 'modified' or
col_lower == 'date' or
col_lower == 'datetime' or
col_lower.startswith('timestamp')):
return True
return False
self._process_converter(
lambda col, c: self._try_convert_to_date(c),
lambda col, c: ((self.keep_default_dates and is_ok(col)) or
col in convert_dates))
| mit |
ethantang95/DIGITS | digits/dataset/generic/views.py | 3 | 7099 | # Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import caffe_pb2
import flask
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import PIL.Image
from .forms import GenericDatasetForm
from .job import GenericDatasetJob
from digits import extensions, utils
from digits.utils.constants import COLOR_PALETTE_ATTRIBUTE
from digits.utils.routing import request_wants_json, job_from_request
from digits.utils.lmdbreader import DbReader
from digits.webapp import scheduler
blueprint = flask.Blueprint(__name__, __name__)
@blueprint.route('/new/<extension_id>', methods=['GET'])
@utils.auth.requires_login
def new(extension_id):
"""
Returns a form for a new GenericDatasetJob
"""
form = GenericDatasetForm()
# Is there a request to clone a job with ?clone=<job_id>
utils.forms.fill_form_if_cloned(form)
extension = extensions.data.get_extension(extension_id)
if extension is None:
raise ValueError("Unknown extension '%s'" % extension_id)
extension_form = extension.get_dataset_form()
# Is there a request to clone a job with ?clone=<job_id>
utils.forms.fill_form_if_cloned(extension_form)
template, context = extension.get_dataset_template(extension_form)
rendered_extension = flask.render_template_string(template, **context)
return flask.render_template(
'datasets/generic/new.html',
extension_title=extension.get_title(),
extension_id=extension_id,
extension_html=rendered_extension,
form=form
)
@blueprint.route('/create/<extension_id>.json', methods=['POST'])
@blueprint.route('/create/<extension_id>',
methods=['POST'],
strict_slashes=False)
@utils.auth.requires_login(redirect=False)
def create(extension_id):
"""
Creates a new GenericDatasetJob
Returns JSON when requested: {job_id,name,status} or {errors:[]}
"""
form = GenericDatasetForm()
form_valid = form.validate_on_submit()
extension_class = extensions.data.get_extension(extension_id)
extension_form = extension_class.get_dataset_form()
extension_form_valid = extension_form.validate_on_submit()
if not (extension_form_valid and form_valid):
# merge errors
errors = form.errors.copy()
errors.update(extension_form.errors)
template, context = extension_class.get_dataset_template(
extension_form)
rendered_extension = flask.render_template_string(
template,
**context)
if request_wants_json():
return flask.jsonify({'errors': errors}), 400
else:
return flask.render_template(
'datasets/generic/new.html',
extension_title=extension_class.get_title(),
extension_id=extension_id,
extension_html=rendered_extension,
form=form,
errors=errors), 400
# create instance of extension class
extension = extension_class(**extension_form.data)
job = None
try:
# create job
job = GenericDatasetJob(
username=utils.auth.get_username(),
name=form.dataset_name.data,
group=form.group_name.data,
backend=form.dsopts_backend.data,
feature_encoding=form.dsopts_feature_encoding.data,
label_encoding=form.dsopts_label_encoding.data,
batch_size=int(form.dsopts_batch_size.data),
num_threads=int(form.dsopts_num_threads.data),
force_same_shape=form.dsopts_force_same_shape.data,
extension_id=extension_id,
extension_userdata=extension.get_user_data(),
)
# Save form data with the job so we can easily clone it later.
utils.forms.save_form_to_job(job, form)
utils.forms.save_form_to_job(job, extension_form)
# schedule tasks
scheduler.add_job(job)
if request_wants_json():
return flask.jsonify(job.json_dict())
else:
return flask.redirect(flask.url_for(
'digits.dataset.views.show',
job_id=job.id()))
except:
if job:
scheduler.delete_job(job)
raise
@blueprint.route('/explore', methods=['GET'])
def explore():
"""
Returns a gallery consisting of the images of one of the dbs
"""
job = job_from_request()
# Get LMDB
db = job.path(flask.request.args.get('db'))
db_path = job.path(db)
if (os.path.basename(db_path) == 'labels' and
COLOR_PALETTE_ATTRIBUTE in job.extension_userdata and
job.extension_userdata[COLOR_PALETTE_ATTRIBUTE]):
# assume single-channel 8-bit palette
palette = job.extension_userdata[COLOR_PALETTE_ATTRIBUTE]
palette = np.array(palette).reshape((len(palette) / 3, 3)) / 255.
# normalize input pixels to [0,1]
norm = mpl.colors.Normalize(vmin=0, vmax=255)
# create map
cmap = plt.cm.ScalarMappable(norm=norm,
cmap=mpl.colors.ListedColormap(palette))
else:
cmap = None
page = int(flask.request.args.get('page', 0))
size = int(flask.request.args.get('size', 25))
reader = DbReader(db_path)
count = 0
imgs = []
min_page = max(0, page - 5)
total_entries = reader.total_entries
max_page = min((total_entries - 1) / size, page + 5)
pages = range(min_page, max_page + 1)
for key, value in reader.entries():
if count >= page * size:
datum = caffe_pb2.Datum()
datum.ParseFromString(value)
if not datum.encoded:
raise RuntimeError("Expected encoded database")
s = StringIO()
s.write(datum.data)
s.seek(0)
img = PIL.Image.open(s)
if cmap and img.mode in ['L', '1']:
data = np.array(img)
data = cmap.to_rgba(data) * 255
data = data.astype('uint8')
# keep RGB values only, remove alpha channel
data = data[:, :, 0:3]
img = PIL.Image.fromarray(data)
imgs.append({"label": None, "b64": utils.image.embed_image_html(img)})
count += 1
if len(imgs) >= size:
break
return flask.render_template(
'datasets/images/explore.html',
page=page, size=size, job=job, imgs=imgs, labels=None,
pages=pages, label=None, total_entries=total_entries, db=db)
def show(job, related_jobs=None):
"""
Called from digits.dataset.views.show()
"""
return flask.render_template('datasets/generic/show.html', job=job, related_jobs=related_jobs)
def summary(job):
"""
Return a short HTML summary of a GenericDatasetJob
"""
return flask.render_template('datasets/generic/summary.html', dataset=job)
| bsd-3-clause |
sunyihuan326/DeltaLab | Andrew_NG_learning/class_two/week_one/syh_01.py | 1 | 8056 | # coding:utf-8
'''
Created on 2017/11/10
@author: sunyihuan
'''
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from class_two.week_one.init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation
from class_two.week_one.init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec
# %matplotlib inline
# 画图
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# load image dataset: blue/red dots in circles
train_X, train_Y, test_X, test_Y = load_dataset()
# 定义模型
def model(X, Y, learning_rate=0.01, num_iterations=15000, print_cost=True, initialization="he"):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
learning_rate -- learning rate for gradient descent
num_iterations -- number of iterations to run gradient descent
print_cost -- if True, print the cost every 1000 iterations
initialization -- flag to choose which initialization to use ("zeros","random" or "he")
Returns:
parameters -- parameters learnt by the model
"""
grads = {}
costs = [] # to keep track of the loss
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 10, 5, 1]
# Initialize parameters dictionary.
if initialization == "zeros":
parameters = initialize_parameters_zeros(layers_dims)
elif initialization == "random":
parameters = initialize_parameters_random(layers_dims)
elif initialization == "he":
parameters = initialize_parameters_he(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
a3, cache = forward_propagation(X, parameters)
# Loss
cost = compute_loss(a3, Y)
# Backward propagation.
grads = backward_propagation(X, Y, cache)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 1000 iterations
if print_cost and i % 1000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
costs.append(cost)
# plot the loss
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
# GRADED FUNCTION: initialize_parameters_zeros
def initialize_parameters_zeros(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
parameters = {}
L = len(layers_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.zeros((layers_dims[l], layers_dims[l - 1]))
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
# 测试initialize_parameters_zeros
parameters = initialize_parameters_zeros([3,2,1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
parameters = model(train_X, train_Y, initialization = "zeros")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
print ("predictions_train = " + str(predictions_train))
print ("predictions_test = " + str(predictions_test))
plt.title("Model with Zeros initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# GRADED FUNCTION: initialize_parameters_random
def initialize_parameters_random(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3) # This seed makes sure your "random" numbers will be the as ours
parameters = {}
L = len(layers_dims) # integer representing the number of layers
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l - 1]) * 10
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
# 测试initialize_parameters_random
parameters = initialize_parameters_random([3, 2, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
parameters = model(train_X, train_Y, initialization = "random")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
print (predictions_train)
print (predictions_test)
plt.title("Model with large random initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# GRADED FUNCTION: initialize_parameters_he
def initialize_parameters_he(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layers_dims) - 1 # integer representing the number of layers
for l in range(1, L + 1):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l - 1]) * np.sqrt(2 / layers_dims[l - 1])
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
# 测试initialize_parameters_he
parameters = initialize_parameters_he([2, 4, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
parameters = model(train_X, train_Y, initialization = "he")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with He initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
| mit |
smartscheduling/scikit-learn-categorical-tree | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
loli/semisupervisedforests | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
mirestrepo/voxels-at-lems | dbrec3d/bof/general/learn_categories/plot_histograms_in_one.py | 1 | 3335 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 7 14:38:46 2011
Plot pca test error vs train error
@author: -
"""
# Computes the gaussian gradients on a boxm_alpha_scene
import os;
import optparse;
import time;
import sys;
import numpy as np
import matplotlib.pyplot as plt
import glob
if __name__=="__main__":
#Parse inputs
print ("******************************Ploting Class Histograms***************************")
parser = optparse.OptionParser(description='Init Category info');
# class_histograms_dir = "/Users/isa/Experiments/BOF/helicopter_providence/pmvs/original/bof_cross_validation/trial_6/class_histograms_20";
class_histograms_dir = "/Users/isa/Experiments/BOF/helicopter_providence/taylor/bof_cross_validation/trial_6/class_histograms_20";
#class_histograms_dir="/Users/isa/Experiments/BOF/helicopter_providence/pca_cross_validation/trial_6/bof/class_histograms_20";
if not os.path.isdir(class_histograms_dir +"/"):
print "Invalid histogram Dir"
sys.exit(-1);
all_scenes_path = class_histograms_dir + "/all_scenes"
if not os.path.isdir(all_scenes_path +"/"):
print "Invalid All scenes path"
sys.exit(-1);
hist_txt_files = glob.glob1(all_scenes_path, '*hist_plot.txt');
nclasses = len(hist_txt_files);
print ("Ploting " +str(nclasses) + " files");
# plt.figure(1); #don't flush old display
# plt.figure(2);
i=0;
colors = ['r','b','cyan', 'g', 'yellow'];
labels = ['Plane','House','Building', 'Car', 'Parking Lot'];
width=0.15;
fig = plt.figure()
ax = fig.add_subplot(111)
class_handle =np.zeros(5);
x=np.arange(20);
for file in hist_txt_files:
full_path_file= all_scenes_path+ "/" + file;
f = open(full_path_file, 'r');
lines=[];
lines = f.readlines();
# x=[];
# line0=lines[0];
# this_line = line0.split(", ");
# for j in this_line:
# x.append(float(j) + width*i);
# print x;
y=[];
line1 = lines[1];
this_line = line1.split(", ");
for j in this_line:
y.append(float(j));
#plt.figure(i);
rects=ax.bar(x+width*i,y, width, color=colors[i], alpha=0.5, label=labels[i]);
plt.hold(True);
i=i+1;
#plt.axis([0,len(x),0,1.0]);
# figure_file = all_scenes_path + "/all_classes_hist.pdf"
# plt.savefig(figure_file, transparent=True);
# f.close();
print ("////////////////////////End Ploting Class Histograms/////////////////////////////")
ax.set_xlabel('Volumetric word entry', fontsize= 18)
ax.set_ylabel('$P(v_j | C_l)$', fontsize= 18)
#ax.set_title('Distribution of volumetric vocabulary for all object categories',fontsize= 'large')
ax.set_xticklabels( x , fontsize=14)
ax.set_xticks(x+0.37)
y=np.arange(0,0.36,0.05);
ax.set_yticklabels(y , fontsize=14)
ax.set_yticks(y)
#ax.legend( (rects, rects), ('Men', 'Women') )
#ax.legend( ('label1', 'label2', 'label3','l4', 'l5') )
plt.hold(False);
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, 'upper right')
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize=18) # the legend text fontsize
plt.show();
figure_file = all_scenes_path + "/all_classes_hist.pdf"
plt.savefig(figure_file, transparent=True);
f.close(); | bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.