repo_name
stringlengths 7
90
| path
stringlengths 5
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 976
581k
| license
stringclasses 15
values |
|---|---|---|---|---|---|
0asa/scikit-learn
|
examples/cluster/plot_digits_linkage.py
|
369
|
2959
|
"""
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
|
bsd-3-clause
|
lukebarnard1/bokeh
|
sphinx/source/docs/tutorials/solutions/stocks.py
|
23
|
2799
|
###
### NOTE: This exercise requires a network connection
###
import numpy as np
import pandas as pd
from bokeh.plotting import figure, output_file, show, VBox
# Here is some code to read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
output_file("stocks.html", title="stocks.py example")
# create a figure
p1 = figure(title="Stocks",
x_axis_label="Date",
y_axis_label="Close price",
x_axis_type="datetime")
p1.below[0].formatter.formats = dict(years=['%Y'],
months=['%b %Y'],
days=['%d %b %Y'])
# EXERCISE: finish this line plot, and add more for the other stocks. Each one should
# have a legend, and its own color.
p1.line(
AAPL['Date'], # x coordinates
AAPL['Adj Close'], # y coordinates
color='#A6CEE3', # set a color for the line
legend='AAPL', # attach a legend label
)
p1.line(IBM['Date'], IBM['Adj Close'], color='#33A02C', legend='IBM')
p1.line(MSFT['Date'], MSFT['Adj Close'], color='#FB9A99', legend='MSFT')
# EXERCISE: style the plot, set a title, lighten the gridlines, etc.
p1.title = "Stock Closing Prices"
p1.grid.grid_line_alpha=0.3
# EXERCISE: start a new figure
p2 = figure(title="AAPL average",
x_axis_label="Date",
y_axis_label="Close price",
x_axis_type="datetime")
p2.below[0].formatter.formats = dict(years=['%Y'],
months=['%b %Y'],
days=['%d %b %Y'])
# Here is some code to compute the 30-day moving average for AAPL
aapl = AAPL['Adj Close']
aapl_dates = AAPL['Date']
window_size = 30
window = np.ones(window_size)/float(window_size)
aapl_avg = np.convolve(aapl, window, 'same')
# EXERCISE: plot a scatter of circles for the individual AAPL prices with legend
# 'close'. Remember to set the x axis type and tools on the first renderer
p2.scatter(aapl_dates, aapl, size=4, color='#A6CEE3', legend='close')
# EXERCISE: plot a line of the AAPL moving average data with the legeng 'avg'
p2.line(aapl_dates, aapl_avg, color='red', legend='avg')
# EXERCISE: style the plot, set a title, lighten the gridlines, etc.
p2.title = "AAPL One-Month Average"
p2.grid.grid_line_alpha=0.3
show(VBox(p1, p2)) # open a browser
|
bsd-3-clause
|
pravsripad/mne-python
|
tutorials/stats-source-space/plot_stats_cluster_time_frequency_repeated_measures_anova.py
|
16
|
10098
|
"""
.. _tut-timefreq-twoway-anova:
====================================================================
Mass-univariate twoway repeated measures ANOVA on single trial power
====================================================================
This script shows how to conduct a mass-univariate repeated measures
ANOVA. As the model to be fitted assumes two fully crossed factors,
we will study the interplay between perceptual modality
(auditory VS visual) and the location of stimulus presentation
(left VS right). Here we use single trials as replications
(subjects) while iterating over time slices plus frequency bands
for to fit our mass-univariate model. For the sake of simplicity we
will confine this analysis to one single channel of which we know
that it exposes a strong induced response. We will then visualize
each effect by creating a corresponding mass-univariate effect
image. We conclude with accounting for multiple comparisons by
performing a permutation clustering test using the ANOVA as
clustering function. The results final will be compared to multiple
comparisons using False Discovery Rate correction.
"""
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import f_threshold_mway_rm, f_mway_rm, fdr_correction
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
tmin, tmax = -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443'] # bads
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = 'MEG 1332'
# Load conditions
reject = dict(grad=4000e-13, eog=150e-6)
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject)
epochs.pick_channels([ch_name]) # restrict example to one channel
###############################################################################
# We have to make sure all conditions have the same counts, as the ANOVA
# expects a fully balanced data matrix and does not forgive imbalances that
# generously (risk of type-I error).
epochs.equalize_event_counts(event_id)
# Factor to down-sample the temporal dimension of the TFR computed by
# tfr_morlet.
decim = 2
freqs = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = freqs / freqs[0]
zero_mean = False # don't correct morlet wavelet to be of mean zero
# To have a true wavelet zero_mean should be True but here for illustration
# purposes it helps to spot the evoked response.
###############################################################################
# Create TFR representations for all conditions
# ---------------------------------------------
epochs_power = list()
for condition in [epochs[k] for k in event_id]:
this_tfr = tfr_morlet(condition, freqs, n_cycles=n_cycles,
decim=decim, average=False, zero_mean=zero_mean,
return_itc=False)
this_tfr.apply_baseline(mode='ratio', baseline=(None, 0))
this_power = this_tfr.data[:, 0, :, :] # we only have one channel.
epochs_power.append(this_power)
###############################################################################
# Setup repeated measures ANOVA
# -----------------------------
#
# We will tell the ANOVA how to interpret the data matrix in terms of factors.
# This is done via the factor levels argument which is a list of the number
# factor levels for each factor.
n_conditions = len(epochs.event_id)
n_replications = epochs.events.shape[0] // n_conditions
factor_levels = [2, 2] # number of levels in each factor
effects = 'A*B' # this is the default signature for computing all effects
# Other possible options are 'A' or 'B' for the corresponding main effects
# or 'A:B' for the interaction effect only (this notation is borrowed from the
# R formula language)
n_freqs = len(freqs)
times = 1e3 * epochs.times[::decim]
n_times = len(times)
###############################################################################
# Now we'll assemble the data matrix and swap axes so the trial replications
# are the first dimension and the conditions are the second dimension.
data = np.swapaxes(np.asarray(epochs_power), 1, 0)
# reshape last two dimensions in one mass-univariate observation-vector
data = data.reshape(n_replications, n_conditions, n_freqs * n_times)
# so we have replications * conditions * observations:
print(data.shape)
###############################################################################
# While the iteration scheme used above for assembling the data matrix
# makes sure the first two dimensions are organized as expected (with A =
# modality and B = location):
#
# .. table:: Sample data layout
#
# ===== ==== ==== ==== ====
# trial A1B1 A1B2 A2B1 B2B2
# ===== ==== ==== ==== ====
# 1 1.34 2.53 0.97 1.74
# ... ... ... ... ...
# 56 2.45 7.90 3.09 4.76
# ===== ==== ==== ==== ====
#
# Now we're ready to run our repeated measures ANOVA.
#
# Note. As we treat trials as subjects, the test only accounts for
# time locked responses despite the 'induced' approach.
# For analysis for induced power at the group level averaged TRFs
# are required.
fvals, pvals = f_mway_rm(data, factor_levels, effects=effects)
effect_labels = ['modality', 'location', 'modality by location']
# let's visualize our effects by computing f-images
for effect, sig, effect_label in zip(fvals, pvals, effect_labels):
plt.figure()
# show naive F-values in gray
plt.imshow(effect.reshape(8, 211), cmap=plt.cm.gray, extent=[times[0],
times[-1], freqs[0], freqs[-1]], aspect='auto',
origin='lower')
# create mask for significant Time-frequency locations
effect[sig >= 0.05] = np.nan
plt.imshow(effect.reshape(8, 211), cmap='RdBu_r', extent=[times[0],
times[-1], freqs[0], freqs[-1]], aspect='auto',
origin='lower')
plt.colorbar()
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title(r"Time-locked response for '%s' (%s)" % (effect_label, ch_name))
plt.show()
###############################################################################
# Account for multiple comparisons using FDR versus permutation clustering test
# -----------------------------------------------------------------------------
#
# First we need to slightly modify the ANOVA function to be suitable for
# the clustering procedure. Also want to set some defaults.
# Let's first override effects to confine the analysis to the interaction
effects = 'A:B'
###############################################################################
# A stat_fun must deal with a variable number of input arguments.
# Inside the clustering function each condition will be passed as flattened
# array, necessitated by the clustering procedure. The ANOVA however expects an
# input array of dimensions: subjects X conditions X observations (optional).
# The following function catches the list input and swaps the first and
# the second dimension and finally calls the ANOVA function.
def stat_fun(*args):
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=False)[0]
# The ANOVA returns a tuple f-values and p-values, we will pick the former.
pthresh = 0.001 # set threshold rather high to save some time
f_thresh = f_threshold_mway_rm(n_replications, factor_levels, effects,
pthresh)
tail = 1 # f-test, so tail > 0
n_permutations = 256 # Save some time (the test won't be too sensitive ...)
T_obs, clusters, cluster_p_values, h0 = mne.stats.permutation_cluster_test(
epochs_power, stat_fun=stat_fun, threshold=f_thresh, tail=tail, n_jobs=1,
n_permutations=n_permutations, buffer_size=None, out_type='mask')
###############################################################################
# Create new stats image with only significant clusters:
good_clusters = np.where(cluster_p_values < .05)[0]
T_obs_plot = T_obs.copy()
T_obs_plot[~clusters[np.squeeze(good_clusters)]] = np.nan
plt.figure()
for f_image, cmap in zip([T_obs, T_obs_plot], [plt.cm.gray, 'RdBu_r']):
plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
freqs[0], freqs[-1]], aspect='auto',
origin='lower')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title("Time-locked response for 'modality by location' (%s)\n"
" cluster-level corrected (p <= 0.05)" % ch_name)
plt.show()
###############################################################################
# Now using FDR:
mask, _ = fdr_correction(pvals[2])
T_obs_plot2 = T_obs.copy()
T_obs_plot2[~mask.reshape(T_obs_plot.shape)] = np.nan
plt.figure()
for f_image, cmap in zip([T_obs, T_obs_plot2], [plt.cm.gray, 'RdBu_r']):
if np.isnan(f_image).all():
continue # nothing to show
plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
freqs[0], freqs[-1]], aspect='auto',
origin='lower')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title("Time-locked response for 'modality by location' (%s)\n"
" FDR corrected (p <= 0.05)" % ch_name)
plt.show()
###############################################################################
# Both cluster level and FDR correction help get rid of
# potential spots we saw in the naive f-images.
|
bsd-3-clause
|
YihaoLu/statsmodels
|
statsmodels/sandbox/tests/test_predict_functional.py
|
29
|
12873
|
from statsmodels.sandbox.predict_functional import predict_functional
import numpy as np
import pandas as pd
import statsmodels.api as sm
from numpy.testing import dec
# If true, the output is written to a multi-page pdf file.
pdf_output = False
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
def pctl(q):
return lambda x : np.percentile(x, 100 *q)
class TestPredFunc(object):
@classmethod
def setup_class(cls):
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
cls.pdf = PdfPages("predict_functional.pdf")
@classmethod
def teardown_class(cls):
if pdf_output:
cls.pdf.close()
def close_or_save(self, fig):
if pdf_output:
self.pdf.savefig(fig)
else:
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_formula(self):
np.random.seed(542)
n = 500
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.normal(size=n)
x4 = np.random.randint(0, 5, size=n)
x4 = np.asarray(["ABCDE"[i] for i in x4])
x5 = np.random.normal(size=n)
y = 0.3*x2**2 + (x4 == "B") + 0.1*(x4 == "B")*x2**2 + x5 + np.random.normal(size=n)
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "x3": x3, "x4": x4, "x5": x5})
fml = "y ~ x1 + bs(x2, df=4) + x3 + x2*x3 + I(x1**2) + C(x4) + C(x4)*bs(x2, df=4) + x5"
model = sm.OLS.from_formula(fml, data=df)
result = model.fit()
summaries = {"x1": np.mean, "x3": pctl(0.75), "x5": np.mean}
values = {"x4": "B"}
pr1, ci1, fvals1 = predict_functional(result, "x2", summaries, values)
values = {"x4": "C"}
pr2, ci2, fvals2 = predict_functional(result, "x2", summaries, values)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x4=B')
plt.plot(fvals2, pr2, '-', label='x4=C')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
plt.ylabel("Fitted mean", size=15)
plt.title("Linear model prediction")
self.close_or_save(fig)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x4=B')
plt.fill_between(fvals1, ci1[:, 0], ci1[:, 1], color='grey')
plt.plot(fvals2, pr2, '-', label='x4=C')
plt.fill_between(fvals2, ci2[:, 0], ci2[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
plt.ylabel("Fitted mean", size=15)
plt.title("Linear model prediction")
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_lm_contrast(self):
np.random.seed(542)
n = 200
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.normal(size=n)
y = x1 + 2*x2 + x3 - x1*x2 + x2*x3 + np.random.normal(size=n)
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "x3": x3})
fml = "y ~ x1 + x2 + x3 + x1*x2 + x2*x3"
model = sm.OLS.from_formula(fml, data=df)
result = model.fit()
values = {"x2": 1, "x3": 1} # y = 4
values2 = {"x2": 0, "x3": 0} # y = x1
pr, cb, fvals = predict_functional(result, "x1", values=values,
values2=values2, ci_method='scheffe')
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.67, 0.8])
plt.plot(fvals, pr, '-', label="Estimate", color='orange', lw=4)
plt.plot(fvals, 4 - fvals, '-', label="Truth", color='lime', lw=4)
plt.fill_between(fvals, cb[:, 0], cb[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
leg = plt.figlegend(ha, lb, "center right")
leg.draw_frame(False)
plt.xlabel("Focus variable", size=15)
plt.ylabel("Mean contrast", size=15)
plt.title("Linear model contrast")
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_glm_formula_contrast(self):
np.random.seed(542)
n = 50
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.normal(size=n)
mn = 5 + 0.1*x1 + 0.1*x2 + 0.1*x3 - 0.1*x1*x2
y = np.random.poisson(np.exp(mn), size=len(mn))
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "x3": x3})
fml = "y ~ x1 + x2 + x3 + x1*x2"
model = sm.GLM.from_formula(fml, data=df, family=sm.families.Poisson())
result = model.fit()
values = {"x2": 1, "x3": 1} # y = 5.2
values2 = {"x2": 0, "x3": 0} # y = 5 + 0.1*x1
pr, cb, fvals = predict_functional(result, "x1", values=values,
values2=values2, ci_method='simultaneous')
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.67, 0.8])
plt.plot(fvals, pr, '-', label="Estimate", color='orange', lw=4)
plt.plot(fvals, 0.2 - 0.1*fvals, '-', label="Truth", color='lime', lw=4)
plt.fill_between(fvals, cb[:, 0], cb[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
leg = plt.figlegend(ha, lb, "center right")
leg.draw_frame(False)
plt.xlabel("Focus variable", size=15)
plt.ylabel("Linear predictor contrast", size=15)
plt.title("Poisson regression contrast")
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_scb(self):
np.random.seed(473)
n = 100
x = np.random.normal(size=(n,4))
x[:, 0] = 1
for fam_name in "poisson", "binomial", "gaussian":
if fam_name == "poisson":
y = np.random.poisson(20, size=n)
fam = sm.families.Poisson()
true_mean = 20
true_lp = np.log(20)
elif fam_name == "binomial":
y = 1 * (np.random.uniform(size=n) < 0.5)
fam = sm.families.Binomial()
true_mean = 0.5
true_lp = 0
elif fam_name == "gaussian":
y = np.random.normal(size=n)
fam = sm.families.Gaussian()
true_mean = 0
true_lp = 0
model = sm.GLM(y, x, family=fam)
result = model.fit()
# CB is for linear predictor or mean response
for linear in False, True:
true = true_lp if linear else true_mean
values = {'const': 1, "x2": 0}
summaries = {"x3": np.mean}
pred1, cb1, fvals1 = predict_functional(result, "x1",
values=values, summaries=summaries, linear=linear)
pred2, cb2, fvals2 = predict_functional(result, "x1",
values=values, summaries=summaries,
ci_method='simultaneous', linear=linear)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.58, 0.8])
plt.plot(fvals1, pred1, '-', color='black', label='Estimate')
plt.plot(fvals1, true * np.ones(len(pred1)), '-', color='purple',
label='Truth')
plt.plot(fvals1, cb1[:, 0], color='blue', label='Pointwise CB')
plt.plot(fvals1, cb1[:, 1], color='blue')
plt.plot(fvals2, cb2[:, 0], color='green', label='Simultaneous CB')
plt.plot(fvals2, cb2[:, 1], color='green')
ha, lb = ax.get_legend_handles_labels()
leg = plt.figlegend(ha, lb, "center right")
leg.draw_frame(False)
plt.xlabel("Focus variable", size=15)
if linear:
plt.ylabel("Linear predictor", size=15)
else:
plt.ylabel("Fitted mean", size=15)
plt.title("%s family prediction" % fam_name.capitalize())
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_glm_formula(self):
np.random.seed(542)
n = 500
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.randint(0, 3, size=n)
x3 = np.asarray(["ABC"[i] for i in x3])
lin_pred = -1 + 0.5*x1**2 + (x3 == "B")
prob = 1 / (1 + np.exp(-lin_pred))
y = 1 * (np.random.uniform(size=n) < prob)
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "x3": x3})
fml = "y ~ x1 + I(x1**2) + x2 + C(x3)"
model = sm.GLM.from_formula(fml, family=sm.families.Binomial(), data=df)
result = model.fit()
summaries = {"x2": np.mean}
for linear in False, True:
values = {"x3": "B"}
pr1, ci1, fvals1 = predict_functional(result, "x1", summaries, values, linear=linear)
values = {"x3": "C"}
pr2, ci2, fvals2 = predict_functional(result, "x1", summaries, values, linear=linear)
exact1 = -1 + 0.5*fvals1**2 + 1
exact2 = -1 + 0.5*fvals2**2
if not linear:
exact1 = 1 / (1 + np.exp(-exact1))
exact2 = 1 / (1 + np.exp(-exact2))
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x3=B')
plt.plot(fvals2, pr2, '-', label='x3=C')
plt.plot(fvals1, exact1, '-', label='x3=B (exact)')
plt.plot(fvals2, exact2, '-', label='x3=C (exact)')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
if linear:
plt.ylabel("Fitted linear predictor", size=15)
else:
plt.ylabel("Fitted probability", size=15)
plt.title("Binomial GLM prediction")
self.close_or_save(fig)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x3=B', color='orange')
plt.fill_between(fvals1, ci1[:, 0], ci1[:, 1], color='grey')
plt.plot(fvals2, pr2, '-', label='x3=C', color='lime')
plt.fill_between(fvals2, ci2[:, 0], ci2[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
if linear:
plt.ylabel("Fitted linear predictor", size=15)
else:
plt.ylabel("Fitted probability", size=15)
plt.title("Binomial GLM prediction")
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_noformula_prediction(self):
np.random.seed(6434)
n = 200
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.normal(size=n)
y = x1 - x2 + np.random.normal(size=n)
exog = np.vstack((x1, x2, x3)).T
model = sm.OLS(y, exog)
result = model.fit()
summaries = {"x3": pctl(0.75)}
values = {"x2": 1}
pr1, ci1, fvals1 = predict_functional(result, "x1", summaries, values)
values = {"x2": -1}
pr2, ci2, fvals2 = predict_functional(result, "x1", summaries, values)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x2=1', lw=4, alpha=0.6, color='orange')
plt.plot(fvals2, pr2, '-', label='x2=-1', lw=4, alpha=0.6, color='lime')
ha, lb = ax.get_legend_handles_labels()
leg = plt.figlegend(ha, lb, "center right")
leg.draw_frame(False)
plt.xlabel("Focus variable", size=15)
plt.ylabel("Fitted mean", size=15)
plt.title("Linear model prediction")
self.close_or_save(fig)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x2=1', lw=4, alpha=0.6, color='orange')
plt.fill_between(fvals1, ci1[:, 0], ci1[:, 1], color='grey')
plt.plot(fvals1, pr2, '-', label='x2=1', lw=4, alpha=0.6, color='lime')
plt.fill_between(fvals2, ci2[:, 0], ci2[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
plt.ylabel("Fitted mean", size=15)
plt.title("Linear model prediction")
self.close_or_save(fig)
|
bsd-3-clause
|
pubnub/Zopkio
|
zopkio/test_runner.py
|
1
|
22763
|
# Copyright 2015 LinkedIn Corp.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Runs tests.
"""
import logging
import threading
import time
import traceback
import webbrowser
from pkgutil import iter_modules
from naarad import Naarad
import zopkio.constants as constants
import zopkio.error_messages as error_messages
# from zopkio.reporter import Reporter
from reporters import junit_reporter, html_reporter
import zopkio.runtime as runtime
import zopkio.test_runner_helper as test_runner_helper
import zopkio.utils as utils
logger = logging.getLogger(__name__)
class FailureHandler(object):
"""
Maintains failure state to manage what to do after a non-test failure occurs
"""
_NO_ABORT = -1
_DEFAULT_FAILURES_BEFORE_ABORT = 2
def __init__(self, failures_before_abort=None):
if failures_before_abort is not None:
self._failures_before_abort = failures_before_abort
else:
self._failures_before_abort = FailureHandler._DEFAULT_FAILURES_BEFORE_ABORT
self._failure_count = 0
def notify_failure(self):
self._failure_count += 1
def notify_success(self):
self._failure_count = 0
def get_abort_status(self):
if (self._failures_before_abort != FailureHandler._NO_ABORT) and (self._failure_count > self._failures_before_abort):
return False
return True
class TestRunner(object):
"""
Runs tests with the information given in the testfile
"""
def __init__(self, *args, **kwargs):
"""
:param kwargs:
:return:
"""
# if ('reporter_type' in kwargs):
# reporter_type = kwargs['reporter_type']
if ('ztestsuite' in kwargs):
self._new_constuctor(**kwargs)
elif (len(args) >= 3 and 'reporter_type' not in kwargs):
self._old_constructor(args[0], args[1], args[2])
elif (len(args) >= 3 and 'reporter_type' in kwargs):
self._old_constructor(args[0], args[1], args[2], reporter_type=kwargs['reporter_type'])
def _old_constructor(self, testfile, tests_to_run, config_overrides, reporter_type=None):
self.testfile = testfile
self.deployment_module, self.dynamic_config_module, self.tests, self.master_config, self.configs = \
test_runner_helper.get_modules(testfile, tests_to_run, config_overrides)
self.directory_info = None
self.reporter_type = reporter_type
self.reporter = None
def _new_constuctor(self, **kwargs):
ztestsuite = kwargs['ztestsuite']
self.testfile = ztestsuite.__class__.__name__
self.deployment_module = ztestsuite
self.dynamic_config_module = ztestsuite
self.tests = ztestsuite.get_tests()
if ('reporter_type' in kwargs):
self.reporter_type = kwargs['reporter_type']
else:
self.reporter_type = None
self.master_config, self.configs = test_runner_helper.load_configs_from_directory(ztestsuite.config_dir,
kwargs.get("config_overrides", {}))
self.directory_info = None
self.reporter = None
def run(self):
"""
This is the main executable function that will run the test
"""
self._setup()
failure_handler = FailureHandler(self.master_config.mapping.get("max_suite_failures_before_abort"))
naarad_obj = Naarad()
for config in self.configs:
config.mapping.iterkeys()
self._reset_tests()
if not failure_handler.get_abort_status():
config.result = constants.SKIPPED
config.message += error_messages.CONFIG_ABORT
self._skip_all_tests()
logger.debug("Skipping " + config.name + "due to too many setup_suite/teardown_suite failures")
else:
runtime.set_active_config(config)
setup_fail = False
if not self.master_config.mapping.get("no_perf", False):
config.naarad_id = naarad_obj.signal_start(self.dynamic_config_module.naarad_config())
config.start_time = time.time()
logger.info("Setting up configuration: " + config.name)
try:
if hasattr(self.deployment_module, 'setup_suite'):
self.deployment_module.setup_suite()
except BaseException:
config.result = constants.SKIPPED
config.message += error_messages.SETUP_SUITE_FAILED + traceback.format_exc()
self._skip_all_tests()
setup_fail = True
failure_handler.notify_failure()
logger.error("Aborting {0} due to setup_suite failure:\n{1}".format(config.name, traceback.format_exc()))
else:
try:
logger.debug("Running tests for configuration: " + config.name)
self._execute_run(config, naarad_obj)
logger.debug("Tearing down configuration: " + config.name)
finally:
try:
if hasattr(self.deployment_module, 'teardown_suite'):
self.deployment_module.teardown_suite()
if not setup_fail:
failure_handler.notify_success()
except BaseException:
config.message += error_messages.TEARDOWN_SUITE_FAILED + traceback.format_exc()
if not setup_fail:
failure_handler.notify_failure()
logger.error("{0} failed teardown_suite(). {1}".format(config.name, traceback.format_exc()))
#kill all orphaned process
if (runtime.get_active_config("cleanup_pending_process",True)):
for deployer in runtime.get_deployers():
deployer.kill_all_process()
config.end_time = time.time()
logger.info("Execution of configuration: {0} complete".format(config.name))
tests = [test for test in self.tests if not isinstance(test, list)] +\
[individual_test for test in self.tests if isinstance(test, list) for individual_test in test]
runtime.get_collector().collect(config, tests)
# log results of tests so that it can be used easily via command-line
self._log_results(tests)
# analysis.generate_diff_reports()
self.reporter.data_source.end_time = time.time()
self.reporter.generate()
if not self.master_config.mapping.get("no-display", False):
self._display_results()
def _convert_naarad_slas_to_list(self, naarad_sla_obj):
"""
Returns a list of SLA objects
:param naarad_sla_obj: the object returned by get_sla_data from the naarad API
"""
sla_objs = []
for a in naarad_sla_obj.values():
for b in a.values():
for c in b.values():
for sla_obj in c.values():
sla_objs.append(sla_obj)
return sla_objs
def _copy_logs(self):
"""
Copy logs from remote machines to local destination
"""
if "LOGS_DIRECTORY" in self.master_config.mapping:
logs_dir = self.master_config.mapping.get("LOGS_DIRECTORY")
else:
logs_dir = self.dynamic_config_module.LOGS_DIRECTORY
utils.makedirs(logs_dir)
for deployer in runtime.get_deployers():
for process in deployer.get_processes():
logs = []
if (hasattr(self.dynamic_config_module, "process_logs")):
logs += self.dynamic_config_module.process_logs(process.servicename)
if (hasattr(self.dynamic_config_module, "machine_logs")):
logs += self.dynamic_config_module.machine_logs().get(process.unique_id, [])
if (hasattr(self.dynamic_config_module, "naarad_logs")):
logs += self.dynamic_config_module.naarad_logs().get(process.unique_id, [])
if hasattr(self.dynamic_config_module, 'log_patterns'):
pattern = self.dynamic_config_module.log_patterns().get(process.unique_id, '^$')
else:
pattern = '^$'
deployer.get_logs(process.unique_id, logs, logs_dir, pattern)
def _execute_performance(self, naarad_obj):
"""
Executes naarad
:param naarad_obj:
:return:
"""
if "LOGS_DIRECTORY" in self.master_config.mapping:
logs_dir = self.master_config.mapping.get("LOGS_DIRECTORY")
else:
logs_dir = self.dynamic_config_module.LOGS_DIRECTORY
if "OUTPUT_DIRECTORY" in self.master_config.mapping:
output_dir = self.master_config.mapping.get("OUTPUT_DIRECTORY")
else:
output_dir = self.dynamic_config_module.OUTPUT_DIRECTORY
naarad_obj.analyze(logs_dir, output_dir)
if ('matplotlib' in [tuple_[1] for tuple_ in iter_modules()]) and len(self.configs) > 1:
prevConfig = self.configs[0]
if naarad_obj._output_directory is None:
naarad_obj._output_directory = output_dir
for curConfig in self.configs[1:]:
if not curConfig.naarad_id is None:
naarad_obj.diff(curConfig.naarad_id, prevConfig.naarad_id)
prevConfig = curConfig
tests = [test for test in self.tests if not isinstance(test, list)] +\
[individual_test for test in self.tests if isinstance(test, list) for individual_test in test]
for test in tests:
if test.naarad_id is not None:
test.naarad_stats = naarad_obj.get_stats_data(test.naarad_id)
test.sla_objs = self._convert_naarad_slas_to_list(naarad_obj.get_sla_data(test.naarad_id))
def _execute_parallel_tests(self, config, failure_handler, naarad_obj, tests):
"""
Evaluates a single test
:param config:
:param failure_handler:
:param naarad_obj:
:param test:
:return:
"""
if not failure_handler.get_abort_status():
for test in tests:
test.result = constants.SKIPPED
test.message += error_messages.TEST_ABORT
logger.debug("Skipping {0} due to too many setup/teardown failures".format(test.name for test in tests))
else:
setup_fail = False
if not self.master_config.mapping.get("no-perf", False):
for test in tests:
test.naarad_config = self.dynamic_config_module.naarad_config()
test.naarad_id = naarad_obj.signal_start(test.naarad_config)
for test in tests:
test.start_time = time.time()
logger.debug("Setting up tests: {0}".format([test.name for test in tests]))
try:
if hasattr(self.deployment_module, 'setup'):
self.deployment_module.setup()
except BaseException:
for test in tests:
test.result = constants.SKIPPED
test.message += error_messages.SETUP_FAILED + traceback.format_exc()
setup_fail = True
failure_handler.notify_failure()
for test in tests:
logger.debug("Aborting {0} due to setup failure:\n{1}".format(test.name, traceback.format_exc()))
else:
logger.debug("Executing tests: {0}".format([test.name for test in tests]))
def run_test_command(test):
while (test.current_iteration < test.total_number_iterations):
test.current_iteration = test.current_iteration + 1
#verify if the test has previously failed. If so then don't try to run again
#unless the config asks for it
if ( (test.result != constants.FAILED)
or (runtime.get_active_config("consecutive_failures_per_test",0) > test.consecutive_failures)
):
self._run_and_verify_test(test)
#if each test is run for number of required iterations before moving to next test
#test.total_number_iterations can be 4 if TEST_ITER for test module is set to 2 and loop_all_test is 2
#in that case each test will be run twice before moving to next test and the whole suite twice
if ((test.current_iteration % (test.total_number_iterations/int(runtime.get_active_config("loop_all_tests",1))))== 0):
break
threads = [threading.Thread(target=run_test_command, args=[test]) for test in tests]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.debug("Tearing down tests: {0}".format([test.name for test in tests]))
try:
if hasattr(self.deployment_module, 'teardown'):
self.deployment_module.teardown()
if not setup_fail:
failure_handler.notify_success()
except BaseException:
for test in tests:
test.message += error_messages.TEARDOWN_FAILED + traceback.format_exc()
if not setup_fail:
failure_handler.notify_failure()
logger.debug("{0} failed teardown():\n{1}".format([test.name for test in tests], traceback.format_exc()))
for test in tests:
test.end_time = time.time()
if not self.master_config.mapping.get("no-display", False):
naarad_obj.signal_stop(test.naarad_id)
logger.debug("Execution of test: {0} complete".format([test.name for test in tests]))
def _execute_single_test(self, config, failure_handler, naarad_obj, test):
"""
Evaluates a single test
:param config:
:param failure_handler:
:param naarad_obj:
:param test:
:return:
"""
if not failure_handler.get_abort_status():
test.result = constants.SKIPPED
test.message += error_messages.TEST_ABORT
logger.debug("Skipping" + test.name + "due to too many setup/teardown failures")
else:
setup_fail = False
if not self.master_config.mapping.get("no-perf", False):
test.naarad_config = self.dynamic_config_module.naarad_config()
test.naarad_id = naarad_obj.signal_start(test.naarad_config)
test.start_time = time.time()
logger.debug("Setting up test: " + test.name)
try:
if hasattr(self.deployment_module, 'setup'):
self.deployment_module.setup()
except BaseException:
test.result = constants.SKIPPED
test.message += error_messages.SETUP_FAILED + traceback.format_exc()
setup_fail = True
failure_handler.notify_failure()
logger.debug("Aborting {0} due to setup failure:\n{1}".format(test.name, traceback.format_exc()))
else:
logger.debug("Executing test: " + test.name)
# 2 ways of loop 1. loop each test (Default) or 2.loop after the entire suite
while (test.current_iteration < test.total_number_iterations):
test.current_iteration = test.current_iteration + 1
#verify if the test has previously failed. If so then don't try to run again
#unless the config asks for it
if ( (test.result != constants.FAILED)
or (runtime.get_active_config("consecutive_failures_per_test",0) > test.consecutive_failures)
):
self._run_and_verify_test(test)
#if each test is run for number of required iterations before moving to next test
#test.total_number_iterations can be 4 if TEST_ITER for test module is set to 2 and loop_all_test is 2
#in that case each test will be run twice before moving to next test and the whole suite twice
if ((test.current_iteration % (test.total_number_iterations/int(runtime.get_active_config("loop_all_tests",1))))== 0):
break
logger.debug("Tearing down test: " + test.name)
try:
if hasattr(self.deployment_module, 'teardown'):
self.deployment_module.teardown()
if not setup_fail:
failure_handler.notify_success()
except BaseException:
test.message += error_messages.TEARDOWN_FAILED + traceback.format_exc()
if not setup_fail:
failure_handler.notify_failure()
logger.debug(test.name + "failed teardown():\n{0}".format(traceback.format_exc()))
test.end_time = time.time()
if not self.master_config.mapping.get("no-display", False):
naarad_obj.signal_stop(test.naarad_id)
logger.debug("Execution of test: " + test.name + " complete")
def _run_and_verify_test(self,test):
"""
Runs a test and performs validation
:param test:
:return:
"""
if(test.total_number_iterations > 1):
logger.debug("Executing iteration:" + str(test.current_iteration))
try:
test.func_start_time = time.time()
test.function()
test.func_end_time = time.time()
test.iteration_results[test.current_iteration] = constants.PASSED
#The final iteration result. Useful to make sure the tests recover in case of error injection
test.result = constants.PASSED
except BaseException as e:
test.result = constants.FAILED
test.iteration_results[test.current_iteration] = constants.FAILED
test.exception = e
test.message = traceback.format_exc()
else:
#If verify_after_each_test flag is set we can verify after each test even for single iteration
if ((test.total_number_iterations > 1) or (runtime.get_active_config("verify_after_each_test",False))):
test.end_time = time.time()
self._copy_logs()
self._execute_singletest_verification(test)
if (test.result == constants.FAILED):
test.consecutive_failures = test.consecutive_failures + 1
else:
test.consecutive_failures = 0
def _execute_run(self, config, naarad_obj):
"""
Executes tests for a single config
"""
failure_handler = FailureHandler(config.mapping.get("max_failures_per_suite_before_abort"))
loop_all_tests = int(runtime.get_active_config("loop_all_tests",1))
self.compute_total_iterations_per_test()
#iterate through the test_suite based on config settings
for i in xrange(loop_all_tests):
for tests in self.tests:
if not isinstance(tests, list) or len(tests) == 1:
if isinstance(tests, list):
test = tests[0]
else:
test = tests
self._execute_single_test(config, failure_handler, naarad_obj, test)
else:
self._execute_parallel_tests(config, failure_handler, naarad_obj, tests)
self._copy_logs()
if not self.master_config.mapping.get("no_perf", False):
naarad_obj.signal_stop(config.naarad_id)
self._execute_performance(naarad_obj)
self._execute_verification()
def _execute_verification(self):
"""
Executes verification methods for the tests
:return:
"""
tests = [test for test in self.tests if not isinstance(test, list)] +\
[individual_test for test in self.tests if isinstance(test, list) for individual_test in test]
for test in tests:
if (test.result != constants.SKIPPED
and test.validation_function is not None
and (test.total_number_iterations <= 1)
and not (runtime.get_active_config("verify_after_each_test",False))
and hasattr(test.validation_function, '__call__')):
try:
test.validation_function()
except BaseException as e:
test.result = constants.FAILED
test.exception = e
def _execute_singletest_verification(self,test):
"""
Performs validation for a single test
:param test:
:return:
"""
if (test.result == constants.PASSED
and test.validation_function is not None
and hasattr(test.validation_function, '__call__')):
try:
test.validation_function()
except BaseException as e:
test.result = constants.FAILED
test.exception = e
if (test.total_number_iterations > 1):
test.iteration_results[test.current_iteration] = constants.FAILED
def compute_total_iterations_per_test(self):
"""
Factor in loop_all_tests config into iteration count of each test
Each test has an tests_iteration associated with them from the test module.
The loop_all_tests is set in config that repeats the entire suite after each
tests necessary iterations is repeated
:return:
"""
loop_all_tests = int(runtime.get_active_config("loop_all_tests",1))
if (loop_all_tests <= 1):
return
else:
for tests in self.tests:
if isinstance(tests, list):
for test in tests:
test.total_number_iterations = test.total_number_iterations * loop_all_tests
else:
tests.total_number_iterations = tests.total_number_iterations * loop_all_tests
def _display_results(self):
"""
Displays the report in a web page
:return:
"""
webbrowser.open("file://" + self.reporter.get_report_location())
def _get_reporter(self):
"""
Gets a Report object used to display results
:return:
"""
if "OUTPUT_DIRECTORY" in self.master_config.mapping:
output_dir = self.master_config.mapping.get("OUTPUT_DIRECTORY")
else:
output_dir = self.dynamic_config_module.OUTPUT_DIRECTORY
if self.reporter_type == 'junit_reporter':
reporter = junit_reporter.Reporter(self.directory_info["report_name"], self.directory_info["results_dir"],
self.directory_info["logs_dir"], output_dir)
else:
reporter = html_reporter.Reporter(self.directory_info["report_name"], self.directory_info["results_dir"],
self.directory_info["logs_dir"], output_dir)
return reporter
def _log_results(self, tests):
for test in tests:
logger.info("{0}----{1}".format(test.name, test.result))
if test.result == constants.FAILED:
logger.info(traceback.format_exception_only(type(test.exception), test.exception))
def _reset_tests(self):
for test in self.tests:
if isinstance(test, list):
for individual_test in test:
individual_test.reset()
else:
test.reset()
def _setup(self):
"""
Sets up output directories and the reporter
:return:
"""
self.directory_info = test_runner_helper.directory_setup(self.testfile,
self.dynamic_config_module,
self.master_config)
self.reporter = self._get_reporter()
runtime.set_active_tests(self.tests)
def _skip_all_tests(self):
for test in self.tests:
if isinstance(test, list):
for individual_test in test:
individual_test.result = constants.SKIPPED
else:
test.result = constants.SKIPPED
|
apache-2.0
|
Sentient07/scikit-learn
|
sklearn/metrics/classification.py
|
7
|
72557
|
"""Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# Bernardo Stein <bernardovstein@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import assert_all_finite
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Thus in binary classification, the count of true negatives is
:math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is
:math:`C_{1,1}` and false positives is :math:`C_{0,1}`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
In the binary case, we can extract true positives, etc as follows:
>>> tn, fp, fn, tp = confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0]).ravel()
>>> (tn, fp, fn, tp)
(0, 2, 1, 1)
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if np.all([l not in y_true for l in labels]):
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(sample_weight, y_true, y_pred)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None, weights=None, sample_weight=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1]_, a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2]_.
Read more in the :ref:`User Guide <cohen_kappa>`.
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
weights : str, optional
List of weighting type to calculate the score. None means no weighted;
"linear" means linear weighted; "quadratic" means quadratic weighted.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] `R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistics 34(4):555-596.
<http://www.mitpressjournals.org/doi/abs/10.1162/coli.07-034-R2#.V0J1MJMrIWo>`_
.. [3] `Wikipedia entry for the Cohen's kappa.
<https://en.wikipedia.org/wiki/Cohen%27s_kappa>`_
"""
confusion = confusion_matrix(y1, y2, labels=labels,
sample_weight=sample_weight)
n_classes = confusion.shape[0]
sum0 = np.sum(confusion, axis=0)
sum1 = np.sum(confusion, axis=1)
expected = np.outer(sum0, sum1) / np.sum(sum0)
if weights is None:
w_mat = np.ones([n_classes, n_classes], dtype=np.int)
w_mat.flat[:: n_classes + 1] = 0
elif weights == "linear" or weights == "quadratic":
w_mat = np.zeros([n_classes, n_classes], dtype=np.int)
w_mat += np.arange(n_classes)
if weights == "linear":
w_mat = np.abs(w_mat - w_mat.T)
else:
w_mat = (w_mat - w_mat.T) ** 2
else:
raise ValueError("Unknown kappa weighting type.")
k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)
return 1 - k
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], default None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
mean_yt = np.average(y_true, weights=sample_weight)
mean_yp = np.average(y_pred, weights=sample_weight)
y_true_u_cent = y_true - mean_yt
y_pred_u_cent = y_pred - mean_yp
cov_ytyp = np.average(y_true_u_cent * y_pred_u_cent, weights=sample_weight)
var_yt = np.average(y_true_u_cent ** 2, weights=sample_weight)
var_yp = np.average(y_pred_u_cent ** 2, weights=sample_weight)
mcc = cov_ytyp / np.sqrt(var_yt * var_yp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall : float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
support : int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary':
if y_type == 'binary':
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
else:
raise ValueError("Target is %s but average='binary'. Please "
"choose another average setting." % y_type)
elif pos_label not in (None, 1):
warnings.warn("Note that pos_label (set to %r) is ignored when "
"average != 'binary' (got %r). You may use "
"labels=[pos_label] to specify a single positive class."
% (pos_label, average), UserWarning)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
The reported averages are a prevalence-weighted macro-average across
classes (equivalent to :func:`precision_recall_fscore_support` with
``average='weighted'``).
Note that in binary classification, recall of the positive class
is also known as "sensitivity"; recall of the negative class is
"specificity".
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if target_names is not None and len(labels) != len(target_names):
warnings.warn(
"labels size, {0}, does not match size of target_names, {1}"
.format(len(labels), len(target_names))
)
last_line_heading = 'avg / total'
if target_names is None:
target_names = [u'%s' % l for l in labels]
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)
report = head_fmt.format(u'', *headers, width=width)
report += u'\n\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\n'
rows = zip(target_names, p, r, f1, s)
for row in rows:
report += row_fmt.format(*row, width=width, digits=digits)
report += u'\n'
# compute averages
report += row_fmt.format(last_line_heading,
np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s),
np.sum(s),
width=width, digits=digits)
return report
def hamming_loss(y_true, y_pred, labels=None, sample_weight=None,
classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array, shape = [n_labels], optional (default=None)
Integer array of labels. If not provided, labels will be inferred
from y_true and y_pred.
.. versionadded:: 0.18
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
.. versionadded:: 0.18
classes : array, shape = [n_labels], optional
(deprecated) Integer array of labels. This parameter has been
renamed to ``labels`` in version 0.18 and will be removed in 0.20.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
if classes is not None:
warnings.warn("'classes' was renamed to 'labels' in version 0.18 and "
"will be removed in 0.20.", DeprecationWarning)
labels = classes
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * len(labels) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
labels : array-like, optional (default=None)
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int or str, default=None
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
.. [1] `Wikipedia entry for the Brier score.
<https://en.wikipedia.org/wiki/Brier_score>`_
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
assert_all_finite(y_true)
assert_all_finite(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
|
bsd-3-clause
|
jsamoocha/pysweat
|
pysweat/persistence/activities.py
|
1
|
2104
|
from pymongo import UpdateOne
import pandas as pd
import numpy as np
import json
import logging
from pymongo.errors import BulkWriteError
def load_activities(mongo, **query):
return pd.DataFrame(list(mongo.db.activities.find(query)))
def __should_write_field(key, value):
if key != 'strava_id':
try:
return not np.isnan(value)
except TypeError:
return True
else:
return False
def save_activities(mongo, activities_df):
try:
mongo.db.activities.bulk_write([
UpdateOne({'strava_id': record['strava_id']},
{'$set': {key: value for (key, value) in record.items()
if __should_write_field(key, value)}},
upsert=True)
for record in activities_df.to_dict(orient='record')
])
except BulkWriteError as bwe:
logging.error('Failed to persist (updated) activities: %s', str(bwe.details))
def get_activity_types(mongo):
return mongo.db.activities.find().distinct('type')
def get_first_activity_without_feature_for_type(mongo, feature_name, activity_type='Run', athlete_id=None):
"""Returns the datetime of the first activity (in time) of the given type for which the given feature does not
exist. If athlete_id is provided, returns only the datetime for that athlete."""
athlete_id_filter_json = """, "athlete_id": %s""" % athlete_id
match_expression_json = """{"$match": {"type": "%s",
"suspicious": {"$exists": false},
"flagged": false,
"%s": {"$exists": false}
%s}}""" % \
(activity_type, feature_name, athlete_id_filter_json if athlete_id else "")
return mongo.db.activities.aggregate([json.loads(match_expression_json),
{"$group": {"_id": "$athlete_id",
"first_date": {"$min": "$start_date_local"}}}])
|
apache-2.0
|
shahankhatch/scikit-learn
|
examples/missing_values.py
|
233
|
3056
|
"""
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
|
bsd-3-clause
|
tynano/slicematrixIO-python
|
slicematrixIO/matrices.py
|
1
|
3516
|
from core import BasePipeline
from utils import rando_name
from uuid import uuid4
import pandas as pd
class DistanceMatrixPipeline(BasePipeline):
def __init__(self, name, kernel = "euclidean", geodesic = False, K = 5, kernel_params = {}, client = None):
params = {"k": K,
"kernel": kernel,
"kernel_params": kernel_params,
"geodesic": geodesic}
BasePipeline.__init__(self, name, "dist_matrix", client, params)
def run(self, dataset, model):
return BasePipeline.run(self, dataset = dataset, model = model)
class DistanceMatrix():
def __init__(self, dataset = None, name = None, pipeline = None, K = 5, kernel = "euclidean", geodesic = False, kernel_params = {}, client = None):
self.client = client
self.type = "dist_matrix"
if dataset is not None:
self.__full_init__(dataset, name, pipeline, K, kernel, geodesic, kernel_params, client)
else:
self.__lazy_init__(name)
# full construction, i.e. start from zero and create it all...
def __full_init__(self, dataset, name = None, pipeline = None, K = 5, kernel = "euclidean", geodesic = False, kernel_params = {}, client = None):
if name == None:
name = rando_name()
self.name = name
self.dataset = dataset
self.pipeline = pipeline
self.kernel = kernel
self.kernel_params = kernel_params
self.K = K
self.geodesic = geodesic
if self.pipeline == None:
pipeline_name = rando_name()
self.pipeline = DistanceMatrixPipeline(name = pipeline_name,
K = K,
kernel = kernel,
geodesic = geodesic,
kernel_params = kernel_params,
client = client)
self.response = self.pipeline.run(self.dataset, self.name)
try:
# model will be key if success
model = self.response['model']
self.name = model.split("/")[-1]
except:
# something went wrong creating the model
raise StandardError(self.response)
# lazy loading for already persisted models
def __lazy_init__(self, model_name):
self.name = model_name
def rankDist(self, target, page = 0):
# todo: predict class given new point
extra_params = {"target": target,
"page": page}
response = self.client.call_model(model = self.name,
type = self.type,
method = "rankDist",
extra_params = extra_params)
try:
return pd.DataFrame(response['rankDist'], index = ['distance']).T.sort(columns = "distance")
except:
raise StandardError(response)
def getKeys(self):
# todo: return r^2 for training model
response = self.client.call_model(model = self.name,
type = self.type,
method = "getKeys",
extra_params = {})
try:
return response['getKeys']
except:
raise StandardError(response)
|
mit
|
houghb/savvy
|
savvy/plotting.py
|
2
|
22776
|
"""
This module creates plots for visualizing sensitivity analysis dataframes.
`make_plot()` creates a radial plot of the first and total order indices.
`make_second_order_heatmap()` creates a square heat map showing the second
order interactions between model parameters.
"""
from collections import OrderedDict
import numpy as np
import pandas as pd
from bokeh.plotting import figure, ColumnDataSource
from bokeh.models import HoverTool
from bokeh.charts import Bar
def make_plot(dataframe=pd.DataFrame(), highlight=[],
top=100, minvalues=0.01, stacked=True, lgaxis=True,
errorbar=True, showS1=True, showST=True):
"""
Basic method to plot first and total order sensitivity indices.
This is the method to generate a Bokeh plot similar to the burtin example
template at the Bokeh website. For clarification, parameters refer to an
input being measured (Tmax, C, k2, etc.) and stats refer to the 1st or
total order sensitivity index.
Parameters
-----------
dataframe : pandas dataframe
Dataframe containing sensitivity analysis results to be
plotted.
highlight : lst, optional
List of strings indicating which parameter wedges will be
highlighted.
top : int, optional
Integer indicating the number of parameters to display
(highest sensitivity values) (after minimum cutoff is
applied).
minvalues : float, optional
Cutoff minimum for which parameters should be plotted.
Applies to total order only.
stacked : bool, optional
Boolean indicating in bars should be stacked for each
parameter (True) or unstacked (False).
lgaxis : bool, optional
Boolean indicating if log axis should be used (True) or if a
linear axis should be used (False).
errorbar : bool, optional
Boolean indicating if error bars are shown (True) or are
omitted (False).
showS1 : bool, optional
Boolean indicating whether 1st order sensitivity indices
will be plotted (True) or omitted (False).
showST : bool, optional
Boolean indicating whether total order sensitivity indices
will be plotted (True) or omitted (False).
**Note if showS1 and showST are both false, the plot will
default to showing ST data only instead of a blank plot**
Returns
--------
p : bokeh figure
A Bokeh figure of the data to be plotted
"""
df = dataframe
top = int(top)
# Initialize boolean checks and check dataframe structure
if (('S1' not in df) or ('ST' not in df) or ('Parameter' not in df) or
('ST_conf' not in df) or ('S1_conf' not in df)):
raise Exception('Dataframe not formatted correctly')
# Remove rows which have values less than cutoff values
df = df[df['ST'] > minvalues]
df = df.dropna()
# Only keep top values indicated by variable top
df = df.sort_values('ST', ascending=False)
df = df.head(top)
df = df.reset_index(drop=True)
# Create arrays of colors and order labels for plotting
colors = ["#a1d99b", "#31a354", "#546775", "#225ea8"]
s1color = np.array(["#31a354"]*df.S1.size)
sTcolor = np.array(["#a1d99b"]*df.ST.size)
errs1color = np.array(["#225ea8"]*df.S1.size)
errsTcolor = np.array(["#546775"]*df.ST.size)
firstorder = np.array(["1st (S1)"]*df.S1.size)
totalorder = np.array(["Total (ST)"]*df.S1.size)
# Add column indicating which parameters should be highlighted
tohighlight = df.Parameter.isin(highlight)
df['highlighted'] = tohighlight
back_color = {
True: "#aeaeb8",
False: "#e6e6e6",
}
# Switch to bar chart if dataframe shrinks below 5 parameters
if len(df) <= 5:
if stacked is False:
data = {
'Sensitivity': pd.Series.append(df.ST, df.S1),
'Parameter': pd.Series.append(df.Parameter, df.Parameter),
'Order': np.append(np.array(['ST']*len(df)),
np.array(['S1']*len(df))),
'Confidence': pd.Series.append(df.ST_conf,
df.S1_conf)
}
p = Bar(data, values='Sensitivity', label='Parameter',
group='Order', legend='top_right',
color=["#31a354", "#a1d99b"], ylabel='Sensitivity Indices')
else:
data = {
'Sensitivity': pd.Series.append(df.S1, (df.ST-df.S1)),
'Parameter': pd.Series.append(df.Parameter, df.Parameter),
'Order': np.append(np.array(['S1']*len(df)),
np.array(['ST']*len(df))),
'Confidence': pd.Series.append(df.S1_conf,
df.ST_conf)
}
p = Bar(data, values='Sensitivity', label='Parameter',
color='Order', legend='top_right',
stack='Order', palette=["#31a354", "#a1d99b"],
ylabel='Sensitivity Indices')
return p
# Create Dictionary of colors
stat_color = OrderedDict()
error_color = OrderedDict()
for i in range(0, 2):
stat_color[i] = colors[i]
# Reset index of dataframe.
for i in range(2, 4):
error_color[i] = colors[i]
# Sizing parameters
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
# Determine wedge size based off number of parameters
big_angle = 2.0 * np.pi / (len(df)+1)
# Determine division of wedges for plotting bars based on # stats plotted
# for stacked or unstacked bars
if stacked is False:
small_angle = big_angle / 5
else:
small_angle = big_angle / 3
# tools enabled for bokeh figure
plottools = "hover, wheel_zoom, save, reset, resize" # , tap"
# Initialize figure with tools, coloring, etc.
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=(-350, 350), y_range=(-350, 350),
min_border=0, outline_line_color="#e6e6e6",
background_fill_color="#e6e6e6", border_fill_color="#e6e6e6",
tools=plottools)
# Specify labels for hover tool
hover = p.select(dict(type=HoverTool))
hover.tooltips = [("Order", "@Order"), ("Parameter", "@Param"),
("Sensitivity", "@Sens"), ("Confidence", "@Conf")]
hover.point_policy = "follow_mouse"
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
# annular wedges divided into smaller sections for bars
# Angles for axial line placement
num_lines = np.arange(0, len(df)+1, 1)
line_angles = np.pi/2 - big_angle/2 - num_lines*big_angle
# Angles for data placement
angles = np.pi/2 - big_angle/2 - df.index.to_series()*big_angle
# circular axes and labels
minlabel = min(round(np.log10(min(df.ST))), round(np.log10(min(df.S1))))
labels = np.power(10.0, np.arange(0, minlabel-1, -1))
# Set max radial line to correspond to 1.1 * maximum value + error
maxvalST = max(df.ST+df.ST_conf)
maxvalS1 = max(df.S1+df.S1_conf)
maxval = max(maxvalST, maxvalS1)
labels = np.append(labels, 0.0)
labels[0] = round(1.1*maxval, 1)
# Determine if radial axis are log or linearly scaled
if lgaxis is True:
radii = (((np.log10(labels / labels[0])) +
labels.size) * (outer_radius - inner_radius) /
labels.size + inner_radius)
radii[-1] = inner_radius
else:
labels = np.delete(labels, -2)
radii = (outer_radius - inner_radius)*labels/labels[0] + inner_radius
# Convert sensitivity values to the plotted values
# Same conversion as for the labels above
# Also calculate the angle to which the bars are placed
# Add values to the dataframe for future reference
cols = np.array(['S1', 'ST'])
for statistic in range(0, 2):
if lgaxis is True:
radius_of_stat = (((np.log10(df[cols[statistic]] / labels[0])) +
labels.size) * (outer_radius - inner_radius) /
labels.size + inner_radius)
lower_of_stat = (((np.log10((df[cols[statistic]] -
df[cols[statistic]+'_conf']) / labels[0])) +
labels.size) * (outer_radius - inner_radius) /
labels.size + inner_radius)
higher_of_stat = (((np.log10((df[cols[statistic]] +
df[cols[statistic]+'_conf']) / labels[0])) +
labels.size) * (outer_radius - inner_radius) /
labels.size + inner_radius)
else:
radius_of_stat = ((outer_radius - inner_radius) *
df[cols[statistic]]/labels[0] + inner_radius)
lower_of_stat = ((outer_radius - inner_radius) *
(df[cols[statistic]] -
df[cols[statistic]+'_conf'])/labels[0] +
inner_radius)
higher_of_stat = ((outer_radius - inner_radius) *
((df[cols[statistic]] +
df[cols[statistic]+'_conf'])/labels[0]) +
inner_radius)
if stacked is False:
startA = -big_angle + angles + (2*statistic + 1)*small_angle
stopA = -big_angle + angles + (2*statistic + 2)*small_angle
df[cols[statistic]+'_err_angle'] = pd.Series((startA+stopA)/2,
index=df.index)
else:
startA = -big_angle + angles + (1)*small_angle
stopA = -big_angle + angles + (2)*small_angle
if statistic == 0:
df[cols[statistic]+'_err_angle'] = pd.Series((startA*2 +
stopA)/3,
index=df.index)
if statistic == 1:
df[cols[statistic]+'_err_angle'] = pd.Series((startA +
stopA*2)/3,
index=df.index)
df[cols[statistic]+'radial'] = pd.Series(radius_of_stat,
index=df.index)
df[cols[statistic]+'upper'] = pd.Series(higher_of_stat,
index=df.index)
df[cols[statistic]+'lower'] = pd.Series(lower_of_stat,
index=df.index)
df[cols[statistic]+'_start_angle'] = pd.Series(startA,
index=df.index)
df[cols[statistic]+'_stop_angle'] = pd.Series(stopA,
index=df.index)
# df[cols[statistic]+'_err_angle'] = pd.Series((startA+stopA)/2,
# index=df.index)
inner_rad = np.ones_like(angles)*inner_radius
df[cols[statistic]+'lower'] = df[cols[statistic]+'lower'].fillna(90)
# Store plotted values into dictionary to be add glyphs
pdata = pd.DataFrame({
'x': np.append(np.zeros_like(inner_rad),
np.zeros_like(inner_rad)),
'y': np.append(np.zeros_like(inner_rad),
np.zeros_like(inner_rad)),
'ymin': np.append(inner_rad, inner_rad),
'ymax': pd.Series.append(df[cols[1]+'radial'],
df[cols[0]+'radial']
).reset_index(drop=True),
'starts': pd.Series.append(df[cols[1] +
'_start_angle'],
df[cols[0] +
'_start_angle']
).reset_index(drop=True),
'stops': pd.Series.append(df[cols[1] +
'_stop_angle'],
df[cols[0] +
'_stop_angle']
).reset_index(drop=True),
'Param': pd.Series.append(df.Parameter,
df.Parameter
).reset_index(drop=True),
'Colors': np.append(sTcolor, s1color),
'Error Colors': np.append(errsTcolor, errs1color),
'Conf': pd.Series.append(df.ST_conf,
df.S1_conf
).reset_index(drop=True),
'Order': np.append(totalorder, firstorder),
'Sens': pd.Series.append(df.ST, df.S1
).reset_index(drop=True),
'Lower': pd.Series.append(df.STlower,
df.S1lower
).reset_index(drop=True),
'Upper': pd.Series.append(df.STupper,
df.S1upper,
).reset_index(drop=True),
'Err_Angle': pd.Series.append(df.ST_err_angle,
df.S1_err_angle,
).reset_index(drop=True)
})
# removed S1 or ST values if indicated by input
if showS1 is False:
pdata = pdata.head(len(df))
if showST is False:
pdata = pdata.tail(len(df))
# convert dataframe to ColumnDataSource for glyphs
pdata_s = ColumnDataSource(pdata)
colors = [back_color[highl] for highl in df.highlighted]
p.annular_wedge(
0, 0, inner_radius, outer_radius, -big_angle+angles,
angles, color=colors,
)
# Adding axis lines and labels
p.circle(0, 0, radius=radii, fill_color=None, line_color="white")
p.text(0, radii[:], [str(r) for r in labels[:]],
text_font_size="8pt", text_align="center", text_baseline="middle")
# Specify that the plotted bars are the only thing to activate hovertool
hoverable = p.annular_wedge(x='x', y='y', inner_radius='ymin',
outer_radius='ymax',
start_angle='starts',
end_angle='stops',
color='Colors',
source=pdata_s
)
hover.renderers = [hoverable]
# Add error bars
if errorbar is True:
p.annular_wedge(0, 0, pdata['Lower'], pdata['Upper'],
pdata['Err_Angle'],
pdata['Err_Angle'],
color=pdata['Error Colors'], line_width=1.0)
p.annular_wedge(0, 0, pdata['Lower'], pdata['Lower'],
pdata['starts'],
pdata['stops'],
color=pdata['Error Colors'], line_width=2.0)
p.annular_wedge(0, 0, pdata['Upper'], pdata['Upper'],
pdata['starts'],
pdata['stops'],
color=pdata['Error Colors'], line_width=2.0)
# Placement of parameter labels
xr = (radii[0]*1.1)*np.cos(np.array(-big_angle/2 + angles))
yr = (radii[0]*1.1)*np.sin(np.array(-big_angle/2 + angles))
label_angle = np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi
# Placing Labels and Legend
legend_text = ['ST', 'ST Conf', 'S1', 'S1 Conf']
p.text(xr, yr, df.Parameter, angle=label_angle,
text_font_size="9pt", text_align="center", text_baseline="middle")
p.rect([-40, -40], [30, -10], width=30, height=13,
color=list(stat_color.values()))
p.rect([-40, -40], [10, -30], width=30, height=1,
color=list(error_color.values()))
p.text([-15, -15, -15, -15], [30, 10, -10, -30], text=legend_text,
text_font_size="9pt", text_align="left", text_baseline="middle")
p.annular_wedge(0, 0, inner_radius-10, outer_radius+10,
-big_angle+line_angles, -big_angle+line_angles,
color="#999999")
return p
def make_second_order_heatmap(df, top=10, name='', mirror=True, include=[]):
"""
Plot a heat map of the second order sensitivity indices from a given
dataframe. If you are choosing a high value of `top` then making
this plot gets expensive and it is recommended to set mirror to False.
Parameters
-----------
df : pandas dataframe
dataframe with second order sensitivity indices. This
dataframe should be formatted in the standard output format
from a Sobol sensitivity analysis in SALib.
top : int, optional
integer specifying the number of parameter interactions to
plot (those with the 'top' greatest values are displayed).
name : str, optional
string indicating the name of the output measure
you are plotting.
mirror : bool, optional
boolean indicating whether you would like to plot the mirror
image (reflection across the diagonal). This mirror image
contains the same information as plotted already, but will
increase the computation time for large dataframes.
include: list, optional
a list of parameters that you would like to make sure are shown
on the heat map (even if they are not in the `top` subset)
Returns
--------
p : bokeh figure
A Bokeh figure to be plotted
"""
# Confirm that df contains second order sensitivity indices
if (list(df.columns.values) !=
['Parameter_1', 'Parameter_2', 'S2', 'S2_conf']):
raise TypeError('df must contain second order sensitivity data')
# Make sure `top` != 0 (it must be at least 1, even if a list is
# specified for `include`.
if top <= 0:
top = 1
print '`top` cannot be <= 0; it has been set to 1'
# Colormap to use for plot
colors = ["#f7fbff", "#deebf7", "#c6dbef", "#9ecae1", "#6baed6",
"#4292c6", "#2171b5", "#08519c", "#08306b"]
# Slice the dataframe to include only the top parameters
df_top = df.sort_values('S2', ascending=False).head(top)
# Make a list of all the parameters that interact with each other
labels = list(set(
[x for x in pd.concat([df_top.Parameter_1, df_top.Parameter_2])]))
for item in include:
if item not in labels:
labels.append(item)
xlabels = labels
ylabels = labels
# Use this to scale the heat map so the max sensitivity index is darkest
maxval = np.max(df.S2)
xlabel = []
ylabel = []
color = []
s2 = []
s2_conf = []
for px in xlabels:
for py in ylabels:
xlabel.append(px)
ylabel.append(py)
# sens is a dataframe with S2 and S2_conf that is stored for
# each box of the heat map
sens = (df[df.Parameter_1.isin([px]) & df.Parameter_2.isin([py])]
.ix[:, ['S2', 'S2_conf']])
# dfs can be empty if there are no corresponding pairs in the
# source dataframe (for example a parameter interacting with
# itself).
if sens.empty and not mirror:
s2.append(float('NaN'))
s2_conf.append(float('NaN'))
color.append("#b3b3b3")
# This heat map is symmetric across the diagonal, so this elif
# statement populates the mirror image if you've chosen to
elif sens.empty and mirror:
sens_mirror = (df[df.Parameter_1.isin([py]) &
df.Parameter_2.isin([px])]
.ix[:, ['S2', 'S2_conf']])
if sens_mirror.empty:
s2.append(float('NaN'))
s2_conf.append(float('NaN'))
color.append("#b3b3b3")
else:
s2.append(sens_mirror.S2.values[0])
s2_conf.append(sens_mirror.S2_conf.values[0])
color.append(colors[int(round((sens_mirror.S2.values[0] /
maxval) * 7) + 1)])
# This else handles the standard (un-mirrored) boxes of the plot
else:
s2.append(sens.S2.values[0])
s2_conf.append(sens.S2_conf.values[0])
color.append(colors[int(round((sens.S2.values[0] /
maxval) * 7) + 1)])
source = ColumnDataSource(data=dict(xlabel=xlabel, ylabel=ylabel, s2=s2,
s2_conf=s2_conf, color=color))
# Initialize the plot
plottools = "resize, hover, save, pan, box_zoom, wheel_zoom, reset"
p = figure(title="%s second order sensitivities" % name,
x_range=list(reversed(labels)), y_range=labels,
x_axis_location="above", plot_width=700, plot_height=700,
toolbar_location="right", tools=plottools)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "8pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = 1.1
# Plot the second order data
p.rect("xlabel", "ylabel", 1, 1, source=source,
color="color", line_color=None)
p.select_one(HoverTool).tooltips = [
('Interaction', '@xlabel-@ylabel'),
('S2', '@s2'),
('S2_conf', '@s2_conf'),
]
return p
|
bsd-2-clause
|
leesavide/pythonista-docs
|
Documentation/matplotlib/mpl_examples/pylab_examples/legend_auto.py
|
3
|
2281
|
"""
This file was written to test matplotlib's autolegend placement
algorithm, but shows lots of different ways to create legends so is
useful as a general examples
Thanks to John Gill and Phil ?? for help at the matplotlib sprint at
pycon 2005 where the auto-legend support was written.
"""
from pylab import *
import sys
rcParams['legend.loc'] = 'best'
N = 100
x = arange(N)
def fig_1():
figure(1)
t = arange(0, 40.0 * pi, 0.1)
l, = plot(t, 100*sin(t), 'r', label='sine')
legend(framealpha=0.5)
def fig_2():
figure(2)
plot(x, 'o', label='x=y')
legend()
def fig_3():
figure(3)
plot(x, -x, 'o', label='x= -y')
legend()
def fig_4():
figure(4)
plot(x, ones(len(x)), 'o', label='y=1')
plot(x, -ones(len(x)), 'o', label='y=-1')
legend()
def fig_5():
figure(5)
n, bins, patches = hist(randn(1000), 40, normed=1)
l, = plot(bins, normpdf(bins, 0.0, 1.0), 'r--', label='fit', linewidth=3)
legend([l, patches[0]], ['fit', 'hist'])
def fig_6():
figure(6)
plot(x, 50-x, 'o', label='y=1')
plot(x, x-50, 'o', label='y=-1')
legend()
def fig_7():
figure(7)
xx = x - (N/2.0)
plot(xx, (xx*xx)-1225, 'bo', label='$y=x^2$')
plot(xx, 25*xx, 'go', label='$y=25x$')
plot(xx, -25*xx, 'mo', label='$y=-25x$')
legend()
def fig_8():
figure(8)
b1 = bar(x, x, color='m')
b2 = bar(x, x[::-1], color='g')
legend([b1[0], b2[0]], ['up', 'down'])
def fig_9():
figure(9)
b1 = bar(x, -x)
b2 = bar(x, -x[::-1], color='r')
legend([b1[0], b2[0]], ['down', 'up'])
def fig_10():
figure(10)
b1 = bar(x, x, bottom=-100, color='m')
b2 = bar(x, x[::-1], bottom=-100, color='g')
b3 = bar(x, -x, bottom=100)
b4 = bar(x, -x[::-1], bottom=100, color='r')
legend([b1[0], b2[0], b3[0], b4[0]], ['bottom right', 'bottom left',
'top left', 'top right'])
if __name__ == '__main__':
nfigs = 10
figures = []
for f in sys.argv[1:]:
try:
figures.append(int(f))
except ValueError:
pass
if len(figures) == 0:
figures = range(1, nfigs+1)
for fig in figures:
fn_name = "fig_%d" % fig
fn = globals()[fn_name]
fn()
show()
|
apache-2.0
|
pgdr/ert
|
share/workflows/jobs/internal-gui/scripts/gen_data_rft_export.py
|
1
|
8462
|
import os
import re
import numpy
import pandas
from PyQt4.QtGui import QCheckBox
from ecl.ecl.rft import WellTrajectory
from res.enkf import ErtPlugin, CancelPluginException
from res.enkf import RealizationStateEnum
from res.enkf.enums import EnkfObservationImplementationType
from res.enkf.export import GenDataCollector, ArgLoader
from ert_gui.ertwidgets.customdialog import CustomDialog
from ert_gui.ertwidgets.listeditbox import ListEditBox
from ert_gui.ertwidgets.models.path_model import PathModel
from ert_gui.ertwidgets.pathchooser import PathChooser
class GenDataRFTCSVExportJob(ErtPlugin):
"""Export of GEN_DATA based rfts to a CSV file. The csv file will in
addition contain the depth as duplicated seperate row.
The script expects four arguments:
output_file: this is the path to the file to output the CSV data to
key: this is the ert GEN_DATA key used for this particular RFT.
report_step: This is the report step configured in the ert
configuration file for this RFT.
trajectory_file: This is the the file containing the
Optional arguments:
case_list: a comma separated list of cases to export (no spaces allowed)
if no list is provided the current case is exported
infer_iteration: If True the script will try to infer the iteration number by looking at the suffix of the case name
(i.e. default_2 = iteration 2)
If False the script will use the ordering of the case list: the first item will be iteration 0,
the second item will be iteration 1...
"""
INFER_HELP = ("<html>"
"If this is checked the iteration number will be inferred from the name i.e.:"
"<ul>"
"<li>case_name -> iteration: 0</li>"
"<li>case_name_0 -> iteration: 0</li>"
"<li>case_name_2 -> iteration: 2</li>"
"<li>case_0, case_2, case_5 -> iterations: 0, 2, 5</li>"
"</ul>"
"Leave this unchecked to set iteration number to the order of the listed cases:"
"<ul><li>case_0, case_2, case_5 -> iterations: 0, 1, 2</li></ul>"
"<br/>"
"</html>")
def getName(self):
return "GEN_DATA RFT CSV Export"
def getDescription(self):
return "Export gen_data RFT results into a single CSV file."
def inferIterationNumber(self, case_name):
pattern = re.compile("_([0-9]+$)")
match = pattern.search(case_name)
if match is not None:
return int(match.group(1))
return 0
def run(self, output_file, trajectory_path, case_list=None, infer_iteration=True):
"""The run method will export the RFT's for all wells and all cases.
The successfull operation of this method hinges on two naming
conventions:
1. All the GEN_DATA RFT observations have key RFT_$WELL
2. The trajectory files are in $trajectory_path/$WELL.txt or $trajectory_path/$WELL_R.txt
"""
wells = set()
obs_pattern = "RFT_*"
enkf_obs = self.ert().getObservations()
obs_keys = enkf_obs.getMatchingKeys(obs_pattern, obs_type=EnkfObservationImplementationType.GEN_OBS)
cases = []
if case_list is not None:
cases = case_list.split(",")
if case_list is None or len(cases) == 0:
cases = [self.ert().getEnkfFsManager().getCurrentFileSystem().getCaseName()]
data_frame = pandas.DataFrame()
for index, case in enumerate(cases):
case = case.strip()
case_frame = pandas.DataFrame()
if not self.ert().getEnkfFsManager().caseExists(case):
raise UserWarning("The case '%s' does not exist!" % case)
if not self.ert().getEnkfFsManager().caseHasData(case):
raise UserWarning("The case '%s' does not have any data!" % case)
if infer_iteration:
iteration_number = self.inferIterationNumber(case)
else:
iteration_number = index
for obs_key in obs_keys:
well = obs_key.replace("RFT_", "")
wells.add(well)
obs_vector = enkf_obs[obs_key]
data_key = obs_vector.getDataKey()
report_step = obs_vector.activeStep()
obs_node = obs_vector.getNode(report_step)
rft_data = GenDataCollector.loadGenData(self.ert(), case, data_key, report_step)
fs = self.ert().getEnkfFsManager().getFileSystem(case)
realizations = fs.realizationList(RealizationStateEnum.STATE_HAS_DATA)
# Trajectory
trajectory_file = os.path.join(trajectory_path, "%s.txt" % well)
if not os.path.isfile(trajectory_file):
trajectory_file = os.path.join(trajectory_path, "%s_R.txt" % well)
trajectory = WellTrajectory(trajectory_file)
arg = ArgLoader.load(trajectory_file, column_names=["utm_x", "utm_y", "md", "tvd"])
tvd_arg = arg["tvd"]
data_size = len(tvd_arg)
# Observations
obs = numpy.empty(shape=(data_size, 2), dtype=numpy.float64)
obs.fill(numpy.nan)
for obs_index in range(len(obs_node)):
data_index = obs_node.getDataIndex(obs_index)
value = obs_node.getValue(obs_index)
std = obs_node.getStandardDeviation(obs_index)
obs[data_index, 0] = value
obs[data_index, 1] = std
for iens in realizations:
realization_frame = pandas.DataFrame(data={"TVD": tvd_arg,
"Pressure": rft_data[iens],
"ObsValue": obs[:, 0],
"ObsStd": obs[:, 1]},
columns=["TVD", "Pressure", "ObsValue", "ObsStd"])
realization_frame["Realization"] = iens
realization_frame["Well"] = well
realization_frame["Case"] = case
realization_frame["Iteration"] = iteration_number
case_frame = case_frame.append(realization_frame)
data_frame = data_frame.append(case_frame)
data_frame.set_index(["Realization", "Well", "Case", "Iteration"], inplace=True)
data_frame.to_csv(output_file)
export_info = "Exported RFT information for wells: %s to: %s " % (", ".join(list(wells)), output_file)
return export_info
def getArguments(self, parent=None):
description = "The GEN_DATA RFT CSV export requires some information before it starts:"
dialog = CustomDialog("Robust CSV Export", description, parent)
output_path_model = PathModel("output.csv")
output_path_chooser = PathChooser(output_path_model)
trajectory_model = PathModel("wellpath", must_be_a_directory=True, must_be_a_file=False, must_exist=True)
trajectory_chooser = PathChooser(trajectory_model)
fs_manager = self.ert().getEnkfFsManager()
all_case_list = fs_manager.getCaseList()
all_case_list = [case for case in all_case_list if fs_manager.caseHasData(case)]
list_edit = ListEditBox(all_case_list)
infer_iteration_check = QCheckBox()
infer_iteration_check.setChecked(True)
infer_iteration_check.setToolTip(GenDataRFTCSVExportJob.INFER_HELP)
dialog.addLabeledOption("Output file path", output_path_chooser)
dialog.addLabeledOption("Trajectory file", trajectory_chooser)
dialog.addLabeledOption("List of cases to export", list_edit)
dialog.addLabeledOption("Infer iteration number", infer_iteration_check)
dialog.addButtons()
success = dialog.showAndTell()
if success:
case_list = ",".join(list_edit.getItems())
try:
return [output_path_model.getPath(), trajectory_model.getPath(), case_list, infer_iteration_check.isChecked()]
except ValueError:
pass
raise CancelPluginException("User cancelled!")
|
gpl-3.0
|
nmartensen/pandas
|
pandas/tests/io/msgpack/test_unpack.py
|
22
|
1948
|
from io import BytesIO
import sys
from pandas.io.msgpack import Unpacker, packb, OutOfData, ExtType
import pytest
class TestUnpack(object):
def test_unpack_array_header_from_file(self):
f = BytesIO(packb([1, 2, 3, 4]))
unpacker = Unpacker(f)
assert unpacker.read_array_header() == 4
assert unpacker.unpack() == 1
assert unpacker.unpack() == 2
assert unpacker.unpack() == 3
assert unpacker.unpack() == 4
pytest.raises(OutOfData, unpacker.unpack)
def test_unpacker_hook_refcnt(self):
if not hasattr(sys, 'getrefcount'):
pytest.skip('no sys.getrefcount()')
result = []
def hook(x):
result.append(x)
return x
basecnt = sys.getrefcount(hook)
up = Unpacker(object_hook=hook, list_hook=hook)
assert sys.getrefcount(hook) >= basecnt + 2
up.feed(packb([{}]))
up.feed(packb([{}]))
assert up.unpack() == [{}]
assert up.unpack() == [{}]
assert result == [{}, [{}], {}, [{}]]
del up
assert sys.getrefcount(hook) == basecnt
def test_unpacker_ext_hook(self):
class MyUnpacker(Unpacker):
def __init__(self):
super(MyUnpacker, self).__init__(ext_hook=self._hook,
encoding='utf-8')
def _hook(self, code, data):
if code == 1:
return int(data)
else:
return ExtType(code, data)
unpacker = MyUnpacker()
unpacker.feed(packb({'a': 1}, encoding='utf-8'))
assert unpacker.unpack() == {'a': 1}
unpacker.feed(packb({'a': ExtType(1, b'123')}, encoding='utf-8'))
assert unpacker.unpack() == {'a': 123}
unpacker.feed(packb({'a': ExtType(2, b'321')}, encoding='utf-8'))
assert unpacker.unpack() == {'a': ExtType(2, b'321')}
|
bsd-3-clause
|
aruneral01/auto-sklearn
|
autosklearn/submit_process.py
|
5
|
5215
|
import shlex
import os
import subprocess
import lockfile
import autosklearn.cli.SMAC_cli_holdout
from autosklearn.constants import *
def submit_call(call, seed, log_dir=None):
print "Calling: " + call
call = shlex.split(call)
if log_dir is None:
proc = subprocess.Popen(call, stdout=open(os.devnull, 'w'))
else:
proc = subprocess.Popen(call, stdout=open(os.path.join(log_dir,
"ensemble_out_%d.log" % seed), 'w'),
stderr=open(os.path.join(log_dir,
"ensemble_err_%d.log" % seed), 'w'))
return proc
def get_algo_exec(runsolver_limit, runsolver_delay, memory_limit, *args):
# Create call to autosklearn
path_to_wrapper = os.path.dirname(os.path.abspath(autosklearn.cli.__file__))
wrapper_exec = os.path.join(path_to_wrapper, "SMAC_cli_holdout.py")
call = 'python %s' % wrapper_exec
# Runsolver does strange things if the time limit is negative. Set it to
# be at least one (0 means infinity)
runsolver_limit = max(1, runsolver_limit)
runsolver_prefix = "runsolver --watcher-data /dev/null -W %d -d %d -M %d " \
% (runsolver_limit, runsolver_delay, memory_limit)
call = '"' + runsolver_prefix + " " + call + " " + " ".join(args) + '"'
return call
def run_smac(dataset_name, dataset, tmp_dir, searchspace, instance_file, limit,
cutoff_time, seed, memory_limit, initial_challengers=None, ):
if limit <= 0:
# It makes no sense to start building ensembles_statistics
return
limit = int(limit)
wallclock_limit = int(limit)
# It makes no sense to use less than 5sec
# We try to do at least one run within the whole runtime
runsolver_softlimit = max(5, cutoff_time - 35)
runsolver_hardlimit_delay = 30
algo_exec = get_algo_exec(runsolver_softlimit,
runsolver_hardlimit_delay,
memory_limit,
dataset)
scenario = {'cli-log-all-calls': 'false',
'console-log-level': 'DEBUG',
'log-level': 'DEBUG',
'cutoffTime': str(runsolver_softlimit),
'wallclock-limit': str(wallclock_limit),
'intraInstanceObj': 'MEAN',
'runObj': 'QUALITY',
'algoExec': algo_exec,
'numIterations': '2147483647',
'totalNumRunsLimit': '2147483647',
'outputDirectory': tmp_dir,
'numConcurrentAlgoExecs': '1',
'maxIncumbentRuns': '2147483647',
'retryTargetAlgorithmRunCount': '0',
'intensification-percentage': '0.5',
'num-ei-random': '1000',
# Number of challengers for local search
'num-challengers': 100,
'initial-incumbent': 'DEFAULT',
'rf-split-min': '10',
'validation': 'false',
'deterministic': 'true',
'abort-on-first-run-crash': 'false',
'pcs-file': os.path.abspath(searchspace),
'execDir': tmp_dir,
'transform-crashed-quality-value': '2',
'instances': instance_file}
scenario_file = os.path.join(tmp_dir, "%s.scenario" % dataset_name)
scenario_file_lock = scenario_file + ".lock"
with lockfile.LockFile(scenario_file_lock):
if not os.path.exists(scenario_file):
with open(scenario_file, 'w') as fh:
for option, value in scenario.items():
fh.write("%s = %s\n" %(option, value))
if initial_challengers is None:
initial_challengers = []
call = " ".join(["smac", '--numRun', str(seed), "--scenario", scenario_file]
+ initial_challengers)
proc = submit_call(call, seed)
return proc, call
def run_ensemble_builder(tmp_dir, dataset_name, task_type, metric, limit,
output_dir, ensemble_size, ensemble_nbest, seed,
ensemble_indices_output_dir):
if limit <= 0:
# It makes no sense to start building ensembles_statistics
return
path_to_root = os.path.dirname(os.path.abspath(__file__))
wrapper_exec = os.path.join(path_to_root, "ensemble_selection_script.py")
runsolver_exec = "runsolver"
delay = 5
task_type = TASK_TYPES_TO_STRING[task_type]
call = " ".join(["python", wrapper_exec, tmp_dir, dataset_name,
task_type, metric, str(limit-5), output_dir,
str(ensemble_size), str(seed), ensemble_indices_output_dir])
# Runsolver does strange things if the time limit is negative. Set it to
# be at least one (0 means infinity)
limit = max(1, limit)
# Now add runsolver command
#runsolver_cmd = "%s --watcher-data /dev/null -W %d" % \
# (runsolver_exec, limit)
runsolver_cmd = "%s --watcher-data /dev/null -W %d -d %d" % \
(runsolver_exec, limit, delay)
call = runsolver_cmd + " " + call
proc = submit_call(call, seed, log_dir=tmp_dir)
return proc
|
bsd-3-clause
|
PyCQA/pydocstyle
|
src/tests/test_cases/canonical_numpy_examples.py
|
3
|
5315
|
"""This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceded by a blank line.
"""
# Example source file from the official "numpydoc docstring guide"
# documentation (with the modification of commenting out all the original
# ``import`` lines, plus adding this note and ``Expectation`` code):
# * As HTML: https://numpydoc.readthedocs.io/en/latest/example.html
# * Source Python:
# https://github.com/numpy/numpydoc/blob/master/doc/example.py
# from __future__ import division, absolute_import, print_function
#
# import os # standard library imports first
#
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
#
# import numpy as np
# import matplotlib as mpl
# import matplotlib.pyplot as plt
#
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
import os
from .expected import Expectation
expectation = Expectation()
expect = expectation.expect
# module docstring expected violations:
expectation.expected.add((
os.path.normcase(__file__),
"D205: 1 blank line required between summary line and description "
"(found 0)"))
expectation.expected.add((
os.path.normcase(__file__),
"D213: Multi-line docstring summary should start at the second line"))
expectation.expected.add((
os.path.normcase(__file__),
"D400: First line should end with a period (not 'd')"))
expectation.expected.add((
os.path.normcase(__file__),
"D404: First word of the docstring should not be `This`"))
expectation.expected.add((
os.path.normcase(__file__),
"D415: First line should end with a period, question mark, or exclamation "
"point (not 'd')"))
@expect("D213: Multi-line docstring summary should start at the second line",
arg_count=3)
@expect("D401: First line should be in imperative mood; try rephrasing "
"(found 'A')", arg_count=3)
@expect("D413: Missing blank line after last section ('Examples')",
arg_count=3)
def foo(var1, var2, long_var_name='hi'):
r"""A one-line summary that does not use variable names.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
long_var_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
type_without_description
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
numpy.array : Relationship (optional).
numpy.ndarray : Relationship (optional), which could be fairly long, in
which case the line wraps here.
numpy.dot, numpy.linalg.norm, numpy.eye
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a Greek symbol like :math:`\omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a = [1, 2, 3]
>>> print([x + 3 for x in a])
[4, 5, 6]
>>> print("a\nb")
a
b
"""
# After closing class docstring, there should be one blank line to
# separate following codes (according to PEP257).
# But for function, method and module, there should be no blank lines
# after closing the docstring.
pass
|
mit
|
wschenck/nest-simulator
|
examples/nest/Potjans_2014/spike_analysis.py
|
20
|
6437
|
# -*- coding: utf-8 -*-
#
# spike_analysis.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Merges spike files, produces raster plots, calculates and plots firing rates
import numpy as np
import glob
import matplotlib.pyplot as plt
import os
import re
datapath = '.'
# get simulation time and numbers of neurons recorded from sim_params.sli
with open(os.path.join(datapath, 'sim_params.sli'), 'r') as f:
sim_params_contents = f.read()
T = float(re.search(r'/t_sim (.+) def', sim_params_contents).group(1))
record_frac = re.search(r'/record_fraction_neurons_spikes (.+) def', sim_params_contents).group(1) == 'true'
if record_frac:
frac_rec = float(re.search(r'/frac_rec_spikes (.+) def', sim_params_contents).group(1))
else:
n_rec = int(re.search(r'/n_rec_spikes (.+) def', sim_params_contents).group(1))
T_start = 200. # starting point of analysis (to avoid transients)
# load node IDs
node_ids = np.loadtxt(os.path.join(datapath, 'population_nodeIDs.dat'), dtype=int)
print('Global IDs:')
print(node_ids)
print()
# number of populations
num_pops = len(node_ids)
print('Number of populations:')
print(num_pops)
print()
# first node ID in each population
raw_first_node_ids = [node_ids[i][0] for i in np.arange(len(node_ids))]
# population sizes
pop_sizes = [node_ids[i][1] - node_ids[i][0] + 1 for i in np.arange(len(node_ids))]
# numbers of neurons for which spikes were recorded
if record_frac:
rec_sizes = [int(pop_sizes[i] * frac_rec) for i in range(len(pop_sizes))]
else:
rec_sizes = [n_rec] * len(pop_sizes)
# first node ID of each population once device node IDs are dropped
first_node_ids = [int(1 + np.sum(pop_sizes[:i]))
for i in np.arange(len(pop_sizes))]
# last node ID of each population once device node IDs are dropped
last_node_ids = [int(np.sum(pop_sizes[:i + 1]))
for i in np.arange(len(pop_sizes))]
# convert lists to a nicer format, i.e. [[2/3e, 2/3i], []....]
Pop_sizes = [pop_sizes[i:i + 2] for i in range(0, len(pop_sizes), 2)]
print('Population sizes:')
print(Pop_sizes)
print()
Raw_first_node_ids = [raw_first_node_ids[i:i + 2] for i in range(0, len(raw_first_node_ids), 2)]
First_node_ids = [first_node_ids[i:i + 2] for i in range(0, len(first_node_ids), 2)]
Last_node_ids = [last_node_ids[i:i + 2] for i in range(0, len(last_node_ids), 2)]
# total number of neurons in the simulation
num_neurons = last_node_ids[len(last_node_ids) - 1]
print('Total number of neurons:')
print(num_neurons)
print()
# load spikes from gdf files, correct node IDs and merge them in population files,
# and store spike trains
# will contain neuron id resolved spike trains
neuron_spikes = [[] for i in np.arange(num_neurons + 1)]
# container for population-resolved spike data
spike_data = [[[], []], [[], []], [[], []], [[], []], [[], []], [[], []],
[[], []], [[], []]]
counter = 0
for layer in ['0', '1', '2', '3']:
for population in ['0', '1']:
output = os.path.join(datapath,
'population_spikes-{}-{}.gdf'.format(layer,
population))
file_pattern = os.path.join(datapath,
'spikes_{}_{}*'.format(layer, population))
files = glob.glob(file_pattern)
print('Merge ' + str(
len(files)) + ' spike files from L' + layer + 'P' + population)
if files:
merged_file = open(output, 'w')
for f in files:
data = open(f, 'r')
nest_version = next(data)
backend_version = next(data)
column_header = next(data)
for l in data:
a = l.split()
a[0] = int(a[0])
a[1] = float(a[1])
raw_first_node_id = Raw_first_node_ids[int(layer)][int(population)]
first_node_id = First_node_ids[int(layer)][int(population)]
a[0] = a[0] - raw_first_node_id + first_node_id
if (a[1] > T_start): # discard data in the start-up phase
spike_data[counter][0].append(num_neurons - a[0])
spike_data[counter][1].append(a[1] - T_start)
neuron_spikes[a[0]].append(a[1] - T_start)
converted_line = str(a[0]) + '\t' + str(a[1]) + '\n'
merged_file.write(converted_line)
data.close()
merged_file.close()
counter += 1
clrs = ['0', '0.5', '0', '0.5', '0', '0.5', '0', '0.5']
plt.ion()
# raster plot
plt.figure(1)
counter = 1
for j in np.arange(num_pops):
for i in np.arange(first_node_ids[j], first_node_ids[j] + rec_sizes[j]):
plt.plot(neuron_spikes[i],
np.ones_like(neuron_spikes[i]) + sum(rec_sizes) - counter,
'k o', ms=1, mfc=clrs[j], mec=clrs[j])
counter += 1
plt.xlim(0, T - T_start)
plt.ylim(0, sum(rec_sizes))
plt.xlabel(r'time (ms)')
plt.ylabel(r'neuron id')
plt.savefig(os.path.join(datapath, 'rasterplot.png'))
# firing rates
rates = []
temp = 0
for i in np.arange(num_pops):
for j in np.arange(first_node_ids[i], last_node_ids[i]):
temp += len(neuron_spikes[j])
rates.append(temp / (rec_sizes[i] * (T - T_start)) * 1e3)
temp = 0
print()
print('Firing rates:')
print(rates)
plt.figure(2)
ticks = np.arange(num_pops)
plt.bar(ticks, rates, width=0.9, color='k')
xticklabels = ['L2/3e', 'L2/3i', 'L4e', 'L4i', 'L5e', 'L5i', 'L6e', 'L6i']
plt.setp(plt.gca(), xticks=ticks + 0.5, xticklabels=xticklabels)
plt.xlabel(r'subpopulation')
plt.ylabel(r'firing rate (spikes/s)')
plt.savefig(os.path.join(datapath, 'firing_rates.png'))
plt.show()
|
gpl-2.0
|
mohseniaref/PySAR-1
|
pysar/add.py
|
1
|
3771
|
#! /usr/bin/env python
############################################################
# Program is part of PySAR v1.0 #
# Copyright(c) 2013, Heresh Fattahi #
# Author: Heresh Fattahi #
############################################################
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import h5py
def Usage():
print '''
***************************************************************
***************************************************************
Generates the sum of two input files.
Usage:
add.py file1.h5 file2.h5
output is file1_plus_file2.h5 (file1 + file2)
example:
add.py velocity_masked.h5 velocity_demCor_masked.h5
add.py velocity_demCor_masked.h5 velocity_demCor_tropCor_masked.h5
add.py timeseries_demCor.h5 timeseries_demCor_tropCor.h5
add.py timeseries.h5 timeseries_demCor.h5
add.py interferograms.h5 interferograms2.h5
***************************************************************
***************************************************************
'''
def main(argv):
try:
File1=sys.argv[1]
File2=sys.argv[2]
except:
Usage();sys.exit(1)
h5file1=h5py.File(File1,'r')
k1=h5file1.keys()
h5file2=h5py.File(File2,'r')
k2=h5file2.keys()
if k1[0]!=k2[0]:
print 'Error'
print 'Both input files should be the same type to calculate the difference'
Usage();sys.exit(1)
outName=File1.split('.')[0]+'_plus_'+File2.split('.')[0]+'.h5'
if k1[0] in ('velocity','temporal_coherence','rmse','mask'):
dset1 = h5file1[k1[0]].get(k1[0])
data1=dset1[0:dset1.shape[0],0:dset1.shape[1]]
dset2 = h5file2[k2[0]].get(k2[0])
data2=dset2[0:dset2.shape[0],0:dset2.shape[1]]
h5file = h5py.File(outName,'w')
group=h5file.create_group(k1[0])
dset = group.create_dataset(k1[0], data=data1+data2, compression='gzip')
for key , value in h5file1[k1[0]].attrs.iteritems():
group.attrs[key]=value
h5file.close()
elif 'timeseries' in k1:
dateList1 = h5file1['timeseries'].keys()
dateList2 = h5file2['timeseries'].keys()
h5timeseries = h5py.File(outName)
group = h5timeseries.create_group('timeseries')
for date in dateList1:
dset1 = h5file1['timeseries'].get(date)
data1 = dset1[0:dset1.shape[0],0:dset1.shape[1]]
dset2 = h5file2['timeseries'].get(date)
data2 = dset2[0:dset2.shape[0],0:dset2.shape[1]]
dset = group.create_dataset(date, data=data1+data2, compression='gzip')
for key,value in h5file1['timeseries'].attrs.iteritems():
group.attrs[key] = value
h5timeseries.close()
elif 'interferograms' in k1:
ifgramList = h5file1['interferograms'].keys()
h5igrams = h5py.File(outName)
gg = h5igrams.create_group('interferograms')
for igram in ifgramList:
dset1=h5file1['interferograms'][igram].get(igram)
data1 = dset1[0:dset1.shape[0],0:dset1.shape[1]]
dset2=h5file2['interferograms'][igram].get(igram)
data2 = dset2[0:dset2.shape[0],0:dset2.shape[1]]
group = gg.create_group(igram)
dset = group.create_dataset(igram, data=data1+data2, compression='gzip')
for key, value in h5file1['interferograms'][igram].attrs.iteritems():
group.attrs[key] = value
try:
gm = h5igrams.create_group('mask')
mask = h5file1['mask'].get('mask')
dset = gm.create_dataset('mask', data=mask, compression='gzip')
except:
print 'mask not found'
h5igrams.close()
h5file1.close()
h5file2.close()
if __name__ == '__main__':
main(sys.argv[1:])
|
mit
|
rc/gensei
|
volume_slicer.py
|
1
|
10647
|
#!/usr/bin/env python
import os, glob, copy, time
from optparse import OptionParser
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import colorConverter
from gensei.base import *
from gensei import Objects, Box
from gensei.utils import get_suffix
def generate_slices(objects, box, options, output_filename_trunk):
"""
Save images of the specimen slices along the specified axes of the
block. Each image displays a planar cut plane of the block intersecting the
ellipsoids.
"""
resolution = box.resolution
imshape = resolution[::-1] + (3,)
aspect = float(resolution[1]) / resolution[0]
figsize = plt.figaspect(aspect)
dpi = resolution[0] / figsize[0]
objects.init_section_based_data()
objects.points = []
for pb, points, delta, n_slice, axis, am in box.get_points():
suffix = get_suffix(n_slice)
# dpi=dpi in plt.figure() messes with figsize... ???
fig = plt.figure(1, figsize=figsize, dpi=dpi)
fig.set_figwidth(figsize[0])
fig.set_figheight(figsize[1])
ax = fig.add_axes([0, 0, 1, 1])
objects.init_section_based_data(axis)
x1b, x2b = pb[am[0]], pb[am[1]]
for islice, x3b in enumerate(pb[am[2]]):
x3b_name = ('%05.2f' % x3b).replace('.', '_')
filename = '.'.join((output_filename_trunk, axis,
suffix % islice, x3b_name,
options.output_format))
output(islice, x3b, filename, '...')
output('computing')
points[:,am[2]] = x3b
mask = np.zeros(points.shape[0], dtype=np.int8)
cmask = np.zeros((points.shape[0], 3), dtype=np.float64)
for obj in objects.itervalues():
color = np.array(colorConverter.to_rgb(obj.conf.color))
bbox = obj.get_aligned_bounding_box()[am]
ix1 = np.where((x1b > bbox[0,0]) & (x1b < bbox[0,1]))[0]
ix2 = np.where((x2b > bbox[1,0]) & (x2b < bbox[1,1]))[0]
a, b = np.meshgrid(ix1, resolution[0]*ix2)
ii = (a + b).ravel()
_mask = obj.contains(points[ii])
mask[ii] += _mask
cmask[ii[_mask]] = color
objects.update_section_based_data(_mask, a.shape, axis, delta,
islice, x3b, obj)
obj.store_intersection(_mask, axis, x3b)
objects.points.append((axis, islice, x3b))
try:
assert_(np.alltrue(mask <= 1))
except:
import pdb; pdb.set_trace()
output('drawing')
ax.cla()
ax.set_axis_off()
ax.imshow(cmask.reshape(imshape), origin='upper')
output('saving')
plt.savefig(filename, format=options.output_format, dpi=dpi)
output('...done')
## plt.show()
usage = """%prog [options] [filename]
If an input file is given, the object class options have no effect.
Default option values do _not_ override the input file options.
"""
defaults = {
'fraction' : 0.1,
'fraction_reduction' : 0.9,
'length_to_width' : 8.0,
'n_slice' : 21,
'dims' : '(10, 10, 10)',
'units' : 'mm',
'resolution' : '600x600',
'n_object' : 10,
'output_format' : 'png',
'timeout' : 5.0,
}
default_objects = {
'class 1' : {
'kind' : 'ellipsoid',
'color' : 'r',
'fraction' : defaults['fraction'],
'length_to_width' : defaults['length_to_width'],
'reduce_to_fit' : {'fraction' : defaults['fraction_reduction']},
'centre' : 'random',
'direction' : 'random direction',
},
}
default_box = {
'dims' : defaults['dims'],
'units' : defaults['units'],
'resolution' : defaults['resolution'],
'n_object' : defaults['n_object'],
'n_slice' : defaults['n_slice'],
}
default_options = {
'output_format' : defaults['output_format'],
'timeout' : defaults['timeout'],
}
help = {
'filename' :
'basename of output file(s) [default: %default]',
'output_format' :
'output file format (supported by the matplotlib backend used) '\
'[default: %s]' % defaults['output_format'],
'n_slice' :
'number of slices to generate [default: %s]' % defaults['n_slice'],
'dims' :
'dimensions of specimen in units given by --units [default: %s]' \
% defaults['dims'],
'units' :
'length units to use [default: %s]' % defaults['units'],
'resolution' :
'figure resolution [default: %s]' % defaults['resolution'],
'fraction' :
'volume fraction of objects [default: %s]' % defaults['fraction'],
'fraction_reduction' :
'volume fraction reduction factor [default: %s]' \
% defaults['fraction_reduction'],
'length_to_width' :
'length-to-width ratio of objects [default: %s]' \
% defaults['length_to_width'],
'n_object' :
'number of objects [default: %s]' % defaults['n_object'],
'timeout' :
'timeout in seconds for attempts to place more ellipsoids into '\
'the block [default: %s]' % defaults['timeout'],
'no_pauses' :
'do not wait for a key press between fitting, generation and slicing '\
'phases (= may overwrite previous slices without telling!)',
}
def main():
time_start = time.time()
parser = OptionParser(usage=usage, version="%prog ")
parser.add_option("-o", "", metavar='filename',
action="store", dest="output_filename_trunk",
default='./slices/slice', help=help['filename'])
parser.add_option("-f", "--format", metavar='format',
action="store", dest="output_format",
default=None, help=help['output_format'])
parser.add_option("-n", "--n-slice", type=int, metavar='int',
action="store", dest="n_slice",
default=None, help=help['n_slice'])
parser.add_option("-d", "--dims", metavar='dims',
action="store", dest="dims",
default=None, help=help['dims'])
parser.add_option("-u", "--units", metavar='units',
action="store", dest="units",
default=None, help=help['units'])
parser.add_option("-r", "--resolution", metavar='resolution',
action="store", dest="resolution",
default=None, help=help['resolution'])
parser.add_option("", "--fraction", type=float, metavar='float',
action="store", dest="fraction",
default=None, help=help['fraction'])
parser.add_option("", "--fraction-reduction", type=float, metavar='float',
action="store", dest="fraction_reduction",
default=None, help=help['fraction_reduction'])
parser.add_option("", "--length-to-width", type=float, metavar='float',
action="store", dest="length_to_width",
default=None, help=help['length_to_width'])
parser.add_option("", "--n-object", type=int, metavar='int',
action="store", dest="n_object",
default=None, help=help['n_object'])
parser.add_option("-t", "--timeout", type=float, metavar='float',
action="store", dest="timeout",
default=None, help=help['timeout'])
parser.add_option("", "--no-pauses",
action="store_true", dest="no_pauses",
default=False, help=help['no_pauses'])
cmdl_options, args = parser.parse_args()
can_override = set()
for key, default in defaults.iteritems():
val = getattr(cmdl_options, key)
if val is None:
setattr(cmdl_options, key, default)
else:
can_override.add(key)
if len(args) == 1:
filename = args[0]
config = Config.from_file(filename, required=['objects', 'box'],
optional=['options'])
else:
conf = {'objects' : default_objects,
'box' : default_box,
'options' : default_options}
config = Config.from_conf(conf, required=['objects', 'box'],
optional=['options'])
config.override(cmdl_options, can_override)
if isinstance(config.box['dims'], str):
config.box['dims'] = eval(config.box['dims'])
if isinstance(config.box['resolution'], str):
aux = tuple([int(r) for r in config.box['resolution'].split('x')])
config.box['resolution'] = aux
## print config
box = Box(**config.box)
options = Object(name='options', **config.options)
output(box)
output(options)
object_classes = Objects.from_conf(config.objects, box)
print object_classes
output('total volume [(%s)^3]: %.2f' % (box.units, box.volume))
## output('total object volume [(%s)^3]: %.2f' % (options.units,
## total_object_volume))
## output('average object volume [(%s)^3]: %.2f' % (options.units,
## average_object_volume))
if not cmdl_options.no_pauses:
spause(""">>> press a key to generate objects
if it takes too long, press <Ctrl-C> and retry with different parameters""")
objects = object_classes.place_objects(box, options)
print objects
output_dir = os.path.dirname(cmdl_options.output_filename_trunk)
if not cmdl_options.no_pauses:
spause(""">>> press a key to save slices in '%s'
all files in that directory will be deleted""" % output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
files = glob.glob(os.path.join(output_dir, '*'))
for name in files:
os.remove(name)
generate_slices(objects, box, options, cmdl_options.output_filename_trunk)
time_end = time.time()
objects.format_intersection_statistics(is_output=True)
# Save the statistics to a text file.
reportname = cmdl_options.output_filename_trunk + '_info.txt'
output('saving report to %s' % reportname)
fd = open(reportname, 'w')
fd.write('started: %s\n' % time.ctime(time_start))
fd.write('elapsed: %.1f [s]\n' % (time_end - time_start))
box.report(fd)
options.report(fd)
fd.write('\n'.join(objects.format_statistics())+'\n')
object_classes.report(fd)
objects.report(fd)
fd.close()
output('all done.')
if __name__ == "__main__":
main()
|
bsd-3-clause
|
tareqmalas/girih
|
scripts/sisc/paper_energy_analysis.py
|
2
|
3824
|
#!/usr/bin/env python
def main():
import sys
raw_data = load_csv(sys.argv[1])
create_table(raw_data)
def get_stencil_num(k):
# add the stencil operator
if k['Stencil Kernel coefficients'] in 'constant':
if int(k['Stencil Kernel semi-bandwidth'])==4:
stencil = '25pt_const'
else:
stencil = '7pt_const'
elif 'no-symmetry' in k['Stencil Kernel coefficients']:
stencil = '7pt_var'
elif 'sym' in k['Stencil Kernel coefficients']:
if int(k['Stencil Kernel semi-bandwidth'])==1:
stencil = '7pt_var_ax_sym'
else:
stencil = '25pt_var'
else:
stencil = '7pt_var_all_sym'
return stencil
def create_table(raw_data):
from operator import itemgetter
import matplotlib.pyplot as plt
import pylab
from csv import DictWriter
req_fields = [('Time stepper orig name', 0), ('Stencil Kernel semi-bandwidth', 1), ('Stencil Kernel coefficients', 0), ('Precision', 0), ('Number of time steps',1), ('Number of tests',1), ('Global NX',1), ('Global NY',1), ('Global NZ',1), ('Thread group size' ,1), ('Intra-diamond prologue/epilogue MStencils',1), ('Energy', 2), ('Energy DRAM', 2), ('Power',2), ('Power DRAM',2), ('WD main-loop RANK0 MStencil/s MAX', 2),('MStencil/s MAX', 2), ('OpenMP Threads',1)]
data = []
for k in raw_data:
tup = dict()
# defaults
if k['Intra-diamond prologue/epilogue MStencils'] == '':
k['Intra-diamond prologue/epilogue MStencils'] = 0
# add the general fileds
for f in req_fields:
try:
v = k[f[0]]
if f[1]==1: v = int(k[f[0]])
if f[1]==2: v = float(k[f[0]])
except:
print f[0]
tup[f[0]] = v
# add the stencil operator
tup['Stencil'] = get_stencil_num(k)
data.append(tup)
# for i in data: print i
data2 = []
for tup in data:
glups = (tup['Number of time steps'] * tup['Global NX']*tup['Global NY']*tup['Global NZ'] - tup['Intra-diamond prologue/epilogue MStencils']*10**6 ) * tup['Number of tests'] / 10**9
tup['Total pJoul/LUP'] = (tup['Energy'] + tup['Energy DRAM'])/glups
tup['DRAM pJoul/LUP'] = (tup['Energy DRAM'])/glups
tup['CPU pJoul/LUP'] = (tup['Energy'])/glups
if 'Dynamic' in tup['Time stepper orig name']:
tup['Time stepper orig name'] = 'MWD'
if 'Dynamic' in tup['Time stepper orig name']:
tup['Performance'] = tup['WD main-loop RANK0 MStencil/s MAX']
else:
tup['Performance'] = tup['MStencil/s MAX']
tup['Threads'] = tup['OpenMP Threads']
tup['Method'] = tup['Time stepper orig name']
data2.append(tup)
#for i in data2: print i
from operator import itemgetter
data2 = sorted(data2, key=itemgetter('Stencil', 'Thread group size', 'Time stepper orig name', 'Global NX', 'Global NY','Global NZ'))
fields = ['Method', 'Stencil', 'Threads', 'Thread group size', 'Global NX', 'Global NY','Global NZ', 'Precision', 'Power', 'Power DRAM', 'CPU pJoul/LUP', 'DRAM pJoul/LUP', 'Total pJoul/LUP', 'Performance']
with open('energy_consumption.csv', 'w') as output_file:
r = DictWriter(output_file,fieldnames=fields)
r.writeheader()
for k in data2:
k2 = dict()
for f in k.keys():
for f2 in fields:
if f == f2:
k2[f] = k[f]
r.writerow(k2)
def load_csv(data_file):
from csv import DictReader
with open(data_file, 'rb') as output_file:
data = DictReader(output_file)
data = [k for k in data]
return data
if __name__ == "__main__":
main()
|
bsd-3-clause
|
Pinafore/qb
|
protobowl_user.py
|
2
|
3709
|
import os
import itertools
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
from multiprocessing import Pool
from datetime import datetime
from plotnine import (
ggplot,
aes,
theme,
geom_density,
geom_histogram,
geom_point,
scale_color_gradient,
)
from qanta.buzzer.util import load_protobowl
def process_user(uid):
group = df_grouped.get_group(uid)
user_questions = set()
dates = group.date.apply(
lambda x: datetime.strptime(x[:-6], "%a %b %d %Y %H:%M:%S %Z%z")
)
index = dates.sort_values()
rows = []
for _, row in group.loc[index.index].iterrows():
if row.qid in user_questions:
continue
user_questions.add(row.qid)
rows.append(row)
for j, row in enumerate(rows):
rows[j].user_n_records = len(rows)
return rows
"""filter users with less than 20 questions
and take the first entry for each question"""
filtered_df_dir = "filter_20_protobowl.h5"
df, questions = load_protobowl()
if os.path.isfile(filtered_df_dir):
with pd.HDFStore("filter_20_protobowl.h5") as store:
df = store["data"]
else:
df = df[df.user_n_records > 20]
df_grouped = df.groupby("uid")
uids = list(df_grouped.groups.keys())
pool = Pool(8)
user_rows = pool.map(process_user, uids)
df = pd.DataFrame(list(itertools.chain(*user_rows)), columns=df.columns)
with pd.HDFStore("filter_20_protobowl.h5") as store:
store["data"] = df
"""plotting"""
df.result = df.result.apply(lambda x: x is True)
ratio = [p / len(questions[x].split()) for p, x in zip(df.position, df.qid)]
df["ratio"] = pd.Series(ratio, index=df.index)
df_user_grouped = df.groupby("uid")
user_stat = df_user_grouped.agg(np.mean)
log_n_records = np.log(user_stat.user_n_records)
log_n_records = log_n_records.sort_values().values
log_n_records = {
"log_n_records": log_n_records,
"index": list(range(len(log_n_records))),
}
log_n_records = pd.DataFrame(log_n_records)
user_stat = user_stat.rename(
index=str, columns={"result": "accuracy", "user_n_records": "n_records"}
)
user_stat = user_stat.loc[user_stat.n_records > 20]
print(len(user_stat))
print(len(df.loc[df.user_n_records > 20]))
print(len(df))
print(len(set(df.qid)))
user_stat["log_n_records"] = pd.Series(
user_stat.n_records.apply(np.log), index=user_stat.index
)
max_color = user_stat.log_n_records.max()
user_stat["alpha"] = pd.Series(
user_stat.log_n_records.apply(lambda x: x / max_color), index=user_stat.index
)
p0 = (
ggplot(user_stat)
+ geom_point(
aes(
x="ratio",
y="accuracy",
size="n_records",
color="log_n_records",
alpha="alpha",
),
show_legend={"color": False, "alpha": False, "size": False},
)
+ scale_color_gradient(high="#e31a1c", low="#ffffcc")
+ theme(aspect_ratio=1)
)
p0.save("protobowl_users.pdf")
# p0.draw()
print("p0 done")
p1 = (
ggplot(user_stat, aes(x="log_n_records", y="..density.."))
+ geom_histogram(color="#e6550d", fill="#fee6ce")
+ geom_density()
+ theme(aspect_ratio=0.3)
)
p1.save("protobowl_hist.pdf")
# p1.draw()
print("p1 done")
p2 = (
ggplot(user_stat, aes(x="accuracy", y="..density.."))
+ geom_histogram(color="#31a354", fill="#e5f5e0")
+ geom_density(aes(x="accuracy"))
+ theme(aspect_ratio=0.3)
)
p2.save("protobowl_acc.pdf")
# p2.draw()
print("p2 done")
p3 = (
ggplot(user_stat, aes(x="ratio", y="..density.."))
+ geom_histogram(color="#3182bd", fill="#deebf7")
+ geom_density(aes(x="ratio"))
+ theme(aspect_ratio=0.3)
)
p3.save("protobowl_pos.pdf")
# p3.draw()
print("p3 done")
|
mit
|
MingdaZhou/gnuradio
|
gr-filter/examples/synth_filter.py
|
58
|
2552
|
#!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
from gnuradio import blocks
import sys
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
for fi in freqs:
s = analog.sig_source_c(fs, analog.GR_SIN_WAVE, fi, 1)
sigs.append(s)
taps = filter.firdes.low_pass_2(len(freqs), fs,
fs/float(nchans)/2, 100, 100)
print "Num. Taps = %d (taps per filter = %d)" % (len(taps),
len(taps)/nchans)
filtbank = filter.pfb_synthesizer_ccf(nchans, taps)
head = blocks.head(gr.sizeof_gr_complex, N)
snk = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(filtbank, head, snk)
for i,si in enumerate(sigs):
tb.connect(si, (filtbank, i))
tb.run()
if 1:
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(snk.data()[1000:])
fftlen = 2048
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
winfunc = scipy.blackman
s2.psd(snk.data()[10000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
pylab.show()
if __name__ == "__main__":
main()
|
gpl-3.0
|
chvogl/tardis
|
tardis/util.py
|
7
|
13796
|
# Utilities for TARDIS
from astropy import units as u, constants, units
import numpy as np
import os
import yaml
import re
import logging
import atomic
k_B_cgs = constants.k_B.cgs.value
c_cgs = constants.c.cgs.value
h_cgs = constants.h.cgs.value
m_e_cgs = constants.m_e.cgs.value
e_charge_gauss = constants.e.gauss.value
class MalformedError(Exception):
pass
class MalformedSpeciesError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return 'Expecting a species notation (e.g. "Si 2", "Si II", "Fe IV") - supplied %s' % self.malformed_element_symbol
class MalformedElementSymbolError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return 'Expecting an atomic symbol (e.g. Fe) - supplied %s' % self.malformed_element_symbol
class MalformedQuantityError(MalformedError):
def __init__(self, malformed_quantity_string):
self.malformed_quantity_string = malformed_quantity_string
def __str__(self):
return 'Expecting a quantity string(e.g. "5 km/s") for keyword - supplied %s' % self.malformed_quantity_string
logger = logging.getLogger(__name__)
synpp_default_yaml_fname = os.path.join(os.path.dirname(__file__), 'data', 'synpp_default.yaml')
def int_to_roman(input):
"""
from http://code.activestate.com/recipes/81611-roman-numerals/
Convert an integer to Roman numerals.
Examples:
>>> int_to_roman(0)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(-1)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(1.5)
Traceback (most recent call last):
TypeError: expected integer, got <type 'float'>
>>> for i in range(1, 21): print int_to_roman(i)
...
I
II
III
IV
V
VI
VII
VIII
IX
X
XI
XII
XIII
XIV
XV
XVI
XVII
XVIII
XIX
XX
>>> print int_to_roman(2000)
MM
>>> print int_to_roman(1999)
MCMXCIX
"""
input = int(input)
if type(input) != type(1):
raise TypeError, "expected integer, got %s" % type(input)
if not 0 < input < 4000:
raise ValueError, "Argument must be between 1 and 3999"
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')
result = ""
for i in range(len(ints)):
count = int(input / ints[i])
result += nums[i] * count
input -= ints[i] * count
return result
def roman_to_int(input):
"""
from http://code.activestate.com/recipes/81611-roman-numerals/
Convert a roman numeral to an integer.
>>> r = range(1, 4000)
>>> nums = [int_to_roman(i) for i in r]
>>> ints = [roman_to_int(n) for n in nums]
>>> print r == ints
1
>>> roman_to_int('VVVIV')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: VVVIV
>>> roman_to_int(1)
Traceback (most recent call last):
...
TypeError: expected string, got <type 'int'>
>>> roman_to_int('a')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: A
>>> roman_to_int('IL')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: IL
"""
if type(input) != type(""):
raise TypeError, "expected string, got %s" % type(input)
input = input.upper()
nums = ['M', 'D', 'C', 'L', 'X', 'V', 'I']
ints = [1000, 500, 100, 50, 10, 5, 1]
places = []
for c in input:
if not c in nums:
raise ValueError, "input is not a valid roman numeral: %s" % input
for i in range(len(input)):
c = input[i]
value = ints[nums.index(c)]
# If the next place holds a larger number, this value is negative.
try:
nextvalue = ints[nums.index(input[i +1])]
if nextvalue > value:
value *= -1
except IndexError:
# there is no next place.
pass
places.append(value)
sum = 0
for n in places: sum += n
# Easiest test for validity...
if int_to_roman(sum) == input:
return sum
else:
raise ValueError, 'input is not a valid roman numeral: %s' % input
def calculate_luminosity(spec_fname, distance, wavelength_column=0, wavelength_unit=u.angstrom, flux_column=1,
flux_unit=u.Unit('erg / (Angstrom cm2 s)')):
#BAD STYLE change to parse quantity
distance = u.Unit(distance)
wavelength, flux = np.loadtxt(spec_fname, usecols=(wavelength_column, flux_column), unpack=True)
flux_density = np.trapz(flux, wavelength) * (flux_unit * wavelength_unit)
luminosity = (flux_density * 4 * np.pi * distance**2).to('erg/s')
return luminosity.value, wavelength.min(), wavelength.max()
def create_synpp_yaml(radial1d_mdl, fname, shell_no=0, lines_db=None):
logger.warning('Currently only works with Si and a special setup')
if not radial1d_mdl.atom_data.has_synpp_refs:
raise ValueError(
'The current atom dataset does not contain the necesarry reference files (please contact the authors)')
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] = -99.0
for key, value in radial1d_mdl.atom_data.synpp_refs.iterrows():
try:
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'].ix[key] = np.log10(
radial1d_mdl.plasma_array.tau_sobolevs[0].ix[value['line_id']])
except KeyError:
pass
relevant_synpp_refs = radial1d_mdl.atom_data.synpp_refs[radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] > -50]
yaml_reference = yaml.load(file(synpp_default_yaml_fname))
if lines_db is not None:
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'lines')
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'refs.dat')
yaml_reference['output']['min_wl'] = float(radial1d_mdl.spectrum.wavelength.to('angstrom').value.min())
yaml_reference['output']['max_wl'] = float(radial1d_mdl.spectrum.wavelength.to('angstrom').value.max())
#raise Exception("there's a problem here with units what units does synpp expect?")
yaml_reference['opacity']['v_ref'] = float((radial1d_mdl.tardis_config.structure.v_inner[0].to('km/s') /
(1000. * u.km / u.s)).value)
yaml_reference['grid']['v_outer_max'] = float((radial1d_mdl.tardis_config.structure.v_outer[-1].to('km/s') /
(1000. * u.km / u.s)).value)
#pdb.set_trace()
yaml_setup = yaml_reference['setups'][0]
yaml_setup['ions'] = []
yaml_setup['log_tau'] = []
yaml_setup['active'] = []
yaml_setup['temp'] = []
yaml_setup['v_min'] = []
yaml_setup['v_max'] = []
yaml_setup['aux'] = []
for species, synpp_ref in relevant_synpp_refs.iterrows():
yaml_setup['ions'].append(100 * species[0] + species[1])
yaml_setup['log_tau'].append(float(synpp_ref['ref_log_tau']))
yaml_setup['active'].append(True)
yaml_setup['temp'].append(yaml_setup['t_phot'])
yaml_setup['v_min'].append(yaml_reference['opacity']['v_ref'])
yaml_setup['v_max'].append(yaml_reference['grid']['v_outer_max'])
yaml_setup['aux'].append(1e200)
yaml.dump(yaml_reference, stream=file(fname, 'w'), explicit_start=True)
def intensity_black_body(nu, T):
"""
Calculate the intensity of a black-body according to the following formula
.. math::
I(\\nu, T) = \\frac{2h\\nu^3}{c^2}\frac{1}{e^{h\\nu \\beta_\\textrm{rad}} - 1}
"""
beta_rad = 1 / (k_B_cgs * T)
return (2 * (h_cgs * nu ** 3) / (c_cgs ** 2)) / (
np.exp(h_cgs * nu * beta_rad) - 1)
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
def species_tuple_to_string(species_tuple, roman_numerals=True):
atomic_number, ion_number = species_tuple
element_symbol = atomic.atomic_number2symbol[atomic_number]
if roman_numerals:
roman_ion_number = int_to_roman(ion_number+1)
return '%s %s' % (element_symbol, roman_ion_number)
else:
return '%s %d' % (element_symbol, ion_number)
def species_string_to_tuple(species_string):
try:
element_symbol, ion_number_string = re.match('^(\w+)\s*(\d+)', species_string).groups()
except AttributeError:
try:
element_symbol, ion_number_string = species_string.split()
except ValueError:
raise MalformedSpeciesError('Species string "{0}" is not of format <element_symbol><number> '
'(e.g. Fe 2, Fe2, ..)'.format(species_string))
atomic_number = element_symbol2atomic_number(element_symbol)
try:
ion_number = roman_to_int(ion_number_string)
except ValueError:
try:
ion_number = int(ion_number_string)
except ValueError:
raise MalformedSpeciesError("Given ion number ('{}') could not be parsed ".format(ion_number_string))
if ion_number > atomic_number:
raise ValueError('Species given does not exist: ion number > atomic number')
return atomic_number, ion_number - 1
def parse_quantity(quantity_string):
if not isinstance(quantity_string, basestring):
raise MalformedQuantityError(quantity_string)
try:
value_string, unit_string = quantity_string.split()
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
value = float(value_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
q = u.Quantity(value, unit_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
return q
def element_symbol2atomic_number(element_string):
reformatted_element_string = reformat_element_symbol(element_string)
if reformatted_element_string not in atomic.symbol2atomic_number:
raise MalformedElementSymbolError(element_string)
return atomic.symbol2atomic_number[reformatted_element_string]
def atomic_number2element_symbol(atomic_number):
"""
Convert atomic number to string symbol
"""
return atomic.atomic_number2symbol[atomic_number]
def reformat_element_symbol(element_string):
"""
Reformat the string so the first letter is uppercase and all subsequent letters lowercase
Parameters
----------
element_symbol: str
Returns
-------
reformated element symbol
"""
return element_string[0].upper() + element_string[1:].lower()
|
bsd-3-clause
|
hposborn/Namaste
|
namaste/run.py
|
1
|
66119
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
:py:mod:`Namaste.py` - Single transit fitting code
-------------------------------------
'''
import numpy as np
import pylab as plt
import scipy.optimize as optimize
from os import sys, path
import datetime
import logging
import pandas as pd
import click
import emcee
import celerite
from .planetlib import *
from .Crossfield_transit import *
from scipy import stats
import scipy.optimize as opt
import pickle
#How best to do this...
#
# make lightcurve a class
# make candidate a class
# make star a class
# make planet a class
# Give candidate a star and planet and lightcurve
# Use the planet to generate the lightcurve model
# Use the star to generate priors
# Give candidate data in the form of a lightcurve - can switch out for CoRoT, Kepler, etc.
class Settings():
'''
The class that contains the model settings
'''
def __init__(self, **kwargs):
self.GP = True # GP on/off
self.nopt = 25 # Number of optimisation steps for each GP and mono before mcmc
self.nsteps = 12500 # Number of MCMC steps
self.npdf = 6000 # Number of samples in the distributions from which to doing calcs.
self.nwalkers = 24 # Number of emcee walkers
self.nthreads = 8 # Number of emcee threads
self.timecut = 6 # Number of Tdurs either side of the transit to be fitted.
self.anomcut = 3.5 # Number of sigma above/below to count as an outlier.
#self.binning = -1 # Days to bin to. -1 dictates no bin
#self.error = False # No error detected
#self.error_comm = '' # Error description
#self.use_previous_samples = False# Using samaples from past run
self.fitsloc = './FitsFiles/' # Storage location to load stuff
self.outfilesloc = './Outputs/' # Storage location to save stuff
self.cadence = 0.0204318 # Cadence. Defaults to K2
self.kernel = 'quasi' # Kernel for use in GPs
self.verbose = True # Print statements or not...
self.mission = 'K2' # Mission
def update(self, **kwargs):
'''
Adding new settings...
'''
self.GP = kwargs.pop('GP', self.GP)
self.nopt = kwargs.pop('nopt', self.nopt)
self.nsteps = kwargs.pop('nsteps', self.nsteps)
self.npdf = kwargs.pop('npdf', self.npdf)
self.nwalkers = kwargs.pop('nwalkers', self.nwalkers)
self.nthreads = kwargs.pop('nthreads', self.nthreads)
self.timecut = kwargs.pop('timecut', self.timecut)
self.anomcut = kwargs.pop('anomcut', self.anomcut)
#self.binning = kwargs.pop('binning', self.binning)
#self.error = kwargs.pop('error', self.binning)
#self.error_comm = kwargs.pop('error_comm', self.binning)
#self.use_previous_samples = kwargs.pop('use_previous_samples', self.binning)
self.fitsloc = kwargs.pop('fitsloc', self.fitsloc)
self.outfilesloc = kwargs.pop('outfilesloc', self.outfilesloc)
self.cadence = kwargs.pop('cadence', self.cadence)
self.kernel = kwargs.pop('kernel', self.kernel)
self.verbose = kwargs.pop('verbose', self.verbose)
self.mission = kwargs.pop('mission', self.mission)
def printall(self):
print(vars(self))
class Star():
def __init__(self, name, settings):
self.objname = name
self.settings = settings
#Initialising lists of monotransits and multi-pl
self.meanmodels=[] #list of mean models to fit.
self.fitdict={} #dictionary of model parameter PDFs to fit
def exofop_dat(self):
#Getting information from exofop...
sdic=ExoFop(int(self.objname))
if 'radius' in sdic.columns:
self.addRad(sdic['radius'],sdic['radius_err'],np.max([sdic['radius']*0.8,sdic['radius_err']]))
else:
raise ValueError("No radius")
if 'teff' in sdic.columns:
self.addTeff(sdic['teff'],sdic['teff_err'],sdic['teff_err'])
else:
raise ValueError("No teff")
if 'mass' in sdic.columns:
self.addMass(sdic['mass'],sdic['mass_err'],sdic['mass_err'])
else:
raise ValueError("No mass")
if 'logg' in sdic.columns:
self.addlogg(sdic['logg'],sdic['logg_err'],sdic['logg_err'])
if 'feh' in sdic.columns:
self.addfeh(sdic['feh'],sdic['feh_err'],sdic['feh_err'])
if 'density' in sdic.columns:
self.addDens(sdic['density'],sdic['density_err'],sdic['density_err'])
else:
self.addDens()
def csvfile_dat(self,file):
#Collecting from CSV file, eg Best_Stellar_Params_nogriz
df=pd.DataFrame.from_csv(file)
row=df.loc[df.epic==int(self.objname)]
csvname=row.index.values[0]
row=row.T.to_dict()[csvname]
self.addRad(row['rad'],row['radep'],row['radem'])
self.addTeff(row['teff'],row['teffep'],row['teffem'])
self.addMass(row['mass'],row['massep'],row['massem'])
self.addlogg(row['logg'],row['loggep'],row['loggem'])
self.addfeh(row['feh'],row['fehep'],row['fehem'])
if not pd.isnull(row['rho']):
self.addDens(row['rho'],row['rhoep'],row['rhoem'])
else:
self.addDens()
#avs avsem avsep dis disem disep epic feh fehem fehep input 2MASS input BV
#input SDSS input_spec logg loggem loggep lum lumem lumep mass massem massep
#n_mod prob rad radem radep rank realspec rho rho_err rhoem rhoep teff teffem teffep
def addTeff(self,val,uerr,derr=None):
self.steff = val
self.steffuerr = uerr
self.steffderr = uerr if type(derr)==type(None) else derr
def addRad(self,val,uerr,derr=None):
self.srad = val
self.sraduerr = uerr
self.sradderr = uerr if type(derr)==type(None) else derr
def addMass(self,val,uerr,derr=None):
self.smass = val
self.smassuerr = uerr
self.smassderr = uerr if type(derr)==type(None) else derr
def addlogg(self,val,uerr,derr=None):
self.slogg = val
self.slogguerr = uerr
self.sloggderr = uerr if type(derr)==type(None) else derr
def addfeh(self,val,uerr,derr=None):
self.sfeh = val
self.sfehuerr = uerr
self.sfehderr = uerr if type(derr)==type(None) else derr
def addDens(self,val=None,uerr=None,derr=None):
if val==None:
val,uerr,derr=CalcDens(self)
else:
#Density defined by others
if val>200:
#Normalising to Solar density:
val/=1410.0;uerr/=1410.0;derr/=1410.0
self.sdens = val
self.sdensuerr = uerr
self.sdensderr = uerr if type(derr)==type(None) else derr
def addLightcurve(self,file):
self.Lcurve=Lightcurve(file,self.objname)
self.mag=self.Lcurve.mag
if self.settings.mission=="kepler" or self.settings.mission=='k2':
self.wn=2.42e-4/np.sqrt(10**((14-self.mag)/2.514)) #2.42e-4 is the White Noise at 14th magnitude for Kepler.
else:
self.wn=np.percentile(abs(np.diff(self.Lcurve.lc[:,1])),40) #Using 40th percentile of the absolute differences.
return self.Lcurve
def EstLimbDark(self):
LDs=getKeplerLDs(self.steffs,logg=self.sloggs,FeH=self.sfeh)
pctns=np.percentile(LDs[0],[15.865525393145707, 50.0, 84.13447460685429])
self.LD1s=LDs[:,0]
self.LD1=pctns[1]
self.LD1uerr=pctns[2]-pctns[1]
self.LD1derr=pctns[1]-pctns[0]
pctns=np.percentile(LDs[1],[15.865525393145707, 50.0, 84.13447460685429])
self.LD2s=LDs[:,1]
self.LD2=pctns[1]
self.LD2uerr=pctns[2]-pctns[1]
self.LD2derr=pctns[1]-pctns[0]
self.initLD()
def PDFs(self):
self.steffs=noneg_GetAssymDist(self.steff,self.steffuerr,self.steffderr,nd=self.settings.npdf)
self.srads =noneg_GetAssymDist(self.srad,self.sraduerr,self.sradderr,nd=self.settings.npdf)
self.smasss=noneg_GetAssymDist(self.smass,self.smassuerr,self.smassderr,nd=self.settings.npdf)
self.sloggs=GetAssymDist(self.slogg,self.slogguerr,self.sloggderr,nd=self.settings.npdf,returndist=True)
self.sdenss=noneg_GetAssymDist(self.sdens,self.sdensuerr,self.sdensderr,nd=self.settings.npdf)
self.EstLimbDark()
def pdflist(self):
if not hasattr(self,'steffs'):
self.PDFs()
if not hasattr(self,'LD1s'):
self.EstLimbDark()
return {'steffs':self.steffs,'srads':self.srads,'smasss':self.smasss,'sloggs':self.sloggs,'sdenss':self.sdenss,\
'LD1s':self.LD1s,'LD2s':self.LD2s}
def addGP(self,vector=None):
if not hasattr(self, 'kern'):
if self.settings.kernel=='Real':
self.kern=celerite.terms.RealTerm(log_a=np.log(np.var(self.Lcurve.lc[:,1])), log_c=-np.log(3.0))+\
celerite.terms.JitterTerm(np.log(self.wn),bounds=dict(log_sigma=(np.log(self.wn)-0.1,np.log(self.wn)+0.1)))
if vector is not None:
self.kern.set_parameter_vector(vector)
self.kern.freeze_parameter('terms[1]:log_sigma') # and freezing white noise
elif self.settings.kernel=='quasi':
self.kern= RotationTerm(np.log(np.var(self.Lcurve.lc[:,1])), np.log(0.25*self.Lcurve.range), np.log(2.5), 0.0,
bounds=dict(
log_amp=(-20.0, -2.0),
log_timescale=(np.log(1.5), np.log(5*self.Lcurve.range)),
log_period=(np.log(1.2), np.log(2*self.Lcurve.range)),
log_factor=(-5.0, 5.0),
)
)+\
celerite.terms.JitterTerm(np.log(self.wn),
bounds=dict(
log_sigma=(np.log(self.wn)-0.1,np.log(self.wn)+0.1)
)
)
if vector is not None:
self.kern.set_parameter_vector(vector)
#self.initgp={'log_amp':,'log_timescale':,'log_period':,'log_factor':,'log_sigma':}
self.kern.freeze_parameter('terms[0]:log_factor') # freezing log factor
self.kern.freeze_parameter('terms[1]:log_sigma') # and freezing white noise
self.initgp={itrm:self.kern.get_parameter(itrm) for itrm in self.kern.get_parameter_names()}
else:
self.initgp={itrm:self.kern.get_parameter(itrm) for itrm in self.kern.get_parameter_names()}
def Optimize_GP(self):
#Optimizing initial GP parameters, depending on GP supplied...
'''
#This optimizes the gaussian process on out-of-transit data. This is then held with a gaussian prior during modelling
'''
import scipy.optimize as op
#Cutting transits from lc
self.Lcurve.calc_mask(self.meanmodels)
lc_trn=self.Lcurve.lc[self.Lcurve.lcmask]
lc_trn[:,1]/=np.nanmedian(lc_trn[:,1])#This half may be different in median from the full lc, so adjusting for this...
if not hasattr(self,'gp'):
self.addGP()
self.kern.thaw_all_parameters()
gp_notr=celerite.GP(kernel=self.kern,mean=1.0,fit_mean=True)
gp_notr.compute(lc_trn[:,0],lc_trn[:,2])
#Initial results:
init_res=op.minimize(neg_log_like, list(gp_notr.get_parameter_vector()), args=(lc_trn[:,1],gp_notr), method="L-BFGS-B")#jac=grad_neg_log_like
fails=0
if self.settings.kernel=='quasi':
suc_res=np.zeros(7)
# Run the optimization routine for a grid of size self.settings.nopt
#log_amp, log_timescale, log_period, log_factor, log_sigma, mean = params
iterparams= np.column_stack((np.random.normal(gp_notr.kernel.get_parameter_vector()[0],3.0,self.settings.nopt),
np.random.uniform(1.2,np.log(0.75*self.Lcurve.range),self.settings.nopt),
np.random.uniform(np.log(6*self.settings.cadence),np.log(0.75*self.Lcurve.range),self.settings.nopt),
np.tile(0.0,self.settings.nopt)))
elif self.settings.kernel=='Real':
suc_res=np.zeros(4)
#log_a, log_c = params
iterparams=np.column_stack((np.random.normal(gp_notr.kernel.get_parameter_vector()[0],np.sqrt(abs(gp_notr.kernel.get_parameter_vector()[0])),self.settings.nopt),
np.random.normal(gp_notr.kernel.get_parameter_vector()[1],np.sqrt(abs(gp_notr.kernel.get_parameter_vector()[1])),self.settings.nopt)))
for n_p in np.arange(self.settings.nopt):
vect=np.hstack((iterparams[n_p],np.log(self.wn),1.0))
#gp_notr.kernel.set_parameter_vector(vect)
try:
result = op.minimize(neg_log_like, vect, args=(lc_trn[:,1], gp_notr), method="L-BFGS-B")# jac=grad_nll,")
if result.success:
#print("success,",result.fun)
suc_res=np.vstack((suc_res,np.hstack((result.x,result.fun))))
else:
fails+=1
except:
#print("fail,",vect)
fails+=1
print(suc_res) if self.settings.verbose else 0
print(str(fails)+" failed attempts out of "+str(self.settings.nopt)) if self.settings.verbose else 0
if len(np.shape(suc_res))==1:
raise ValueError("No successful GP minimizations")
else:
suc_res=suc_res[1:,:]
bestres=suc_res[np.argmin(suc_res[:,-1])]
gp_notr.set_parameter_vector(bestres[:-1])
self.optimised_gp=gp_notr
wn_factor = bestres[4]-np.log(self.wn)
self.optgp={itrm:gp_notr.get_parameter(itrm) for itrm in gp_notr.get_parameter_names()}
# Update the kernel and print the final log-likelihood.
for itrm in gp_notr.kernel.get_parameter_names()[:-1]:
self.kern.set_parameter(itrm,gp_notr.kernel.get_parameter(itrm))
if self.settings.kernel=='quasi':
self.kern.freeze_parameter('terms[0]:log_factor') # re-freezing log factor
self.kern.freeze_parameter('terms[1]:log_sigma') # and re-freezing white noise
#Adding
self.fitdict.update({'kernel:'+nm:np.random.normal(gp_notr.get_parameter('kernel:'+nm),1.5,self.settings.nwalkers) for nm in self.kern.get_parameter_names()})
print("white noise changed by a factor of "+str(np.exp(wn_factor))[:4]) if self.settings.verbose else 0
print("GP improved from ",init_res.fun," to ",bestres[-1]) if self.settings.verbose else 0
'''return bestres[:-2] #mean_shift... is this indicative of the whole lightcurve or just this half of it?'''
def AddMonotransit(self, tcen, tdur, depth, b=0.41,replace=True):
if not hasattr(self,'steffs'):
self.PDFs()
#Adding monotransit classes to the star class... Up to four possible.
if not hasattr(self,'mono1') or replace:
#self.LD1s, self.LD2s, self.sdenss,self.Lcurve.lc,
self.mono1 = Monotransit(tcen, tdur, depth, self.settings, name=self.objname+'.1')
self.mono1.calcmaxvel(self.Lcurve.lc,self.sdenss)
self.mono1.Optimize_mono(self.Lcurve.flatten(),self.LDprior.copy())
self.mono1.SaveInput(self.pdflist())
self.meanmodels+=[self.mono1]
'''
while a<=5 and len(self.meanmodels)==initlenmonos:
if not hasattr(self,'mono'+str(a)) or replace:
setattr(self,'mono'+str(a)) = Monotransit(tcen, tdur, depth, self.settings, self.LD1s, self.LD2s, self.denss,self.lc, name=self.objname+'.'+str(a), b=b)
exec("self.mono"+str(a)+".Optimize_mono(Lcurve.flaten())")
exec("self.mono"+str(a)+".calcmaxvel(self.Lcurve.lc,self.sdenss)")
exec("self.meanmodels+=[self.mono"+str(a)+"]")
a+=1
'''
def AddNormalPlanet(self, tcen, tdur, depth, Period, b=0.41,replace=False):
if not hasattr(self,'steffs'):
self.PDFs()
#Adding transiting planet classes to the star class using dfm's "transit"... Up to four possible.
if not hasattr(self,'norm1') or replace:
self.norm1 = Multtransit(tcen, tdur, depth, self.settings, name=self.objname+'.5', b=b)
self.meanmodels+=[self.norm1]
'''
while a<=5 and len(self.meanmodels)==initlenmonos:
if not hasattr(self,'mono'+str(a)) or replace:
setattr(self,'mono'+str(a)) = Monotransit(tcen, tdur, depth, self.settings, self.LD1s, self.LD2s, self.denss,self.lc, name=self.objname+'.'+str(a), b=b)
exec("self.mono"+str(a)+".Optimize_mono(Lcurve.flaten())")
exec("self.mono"+str(a)+".calcmaxvel(self.Lcurve.lc,self.sdenss)")
exec("self.meanmodels+=[self.mono"+str(a)+"]")
a+=1
'''
def initLD(self):
if not hasattr(self,'steffs'):
self.PDFs()
#Getting LD parameters for transit modelling:
self.LDprior={'LD1':[0,1.0,'gaussian',np.median(self.LD1s),np.std(self.LD1s)],
'LD2':[0,1.0,'gaussian',np.median(self.LD2s),np.std(self.LD2s)]}
def BuildMeanModel(self):
#for model in self.meanmodels: #<<<TBD
self.meanmodel_comb=MonotransitModel(tcen=self.mono1.tcen,
b=self.mono1.b,
vel=self.mono1.vel,
RpRs=self.mono1.RpRs,
LD1=np.median(self.LD1s),
LD2=np.median(self.LD2s))
def BuildMeanPriors(self):
#Building mean model priors
if not hasattr(self, 'LDprior'):
self.initLD()
self.meanmodel_priors=self.mono1.priors.copy()
self.meanmodel_priors.update({'mean:'+ldp:self.LDprior[ldp] for ldp in self.LDprior})
def BuildAllPriors(self,keylist=None):
#Building priors from both GP and mean model and ordering by
if not hasattr(self, 'meanmodel_priors'):
self.BuildMeanPriors()
self.priors=self.meanmodel_priors.copy()#{key:self.meanmodel_priors[key] for key in self.meanmodel_priors.keys()}
self.priors.update({'kernel:'+self.kern.get_parameter_names()[keyn]:[self.kern.get_parameter_bounds()[keyn][0],
self.kern.get_parameter_bounds()[keyn][1]]
for keyn in range(len(self.kern.get_parameter_names()))
})
self.priors['kernel:terms[0]:log_amp']=self.priors['kernel:terms[0]:log_amp']+['evans',0.25*len(self.Lcurve.lc[:,0])]
print(self.priors)
if keylist is not None:
#Sorting to match parameter vector:
newprior={key:self.priors[key] for key in keylist}
#print(str(len(keylist))+" keys in vector leading to "+str(len(newprior))+" new keys in priors, from "+str(len(self.priors))+" initially") if self.settings.verbose else 0
self.priors=newprior
print(self.priors)
def RunModel(self):
self.BuildMeanPriors()
self.BuildMeanModel()
self.gp=celerite.GP(kernel=self.kern,mean=self.meanmodel_comb,fit_mean=True)
self.BuildAllPriors(self.gp.get_parameter_names())
#Returning monotransit model from information.
chx=np.random.choice(self.settings.npdf,self.settings.nwalkers,replace=False)
self.fitdict.update({'mean:'+nm:getattr(self.mono1,nm+'s')[chx] for nm in ['tcen','b','vel','RpRs']})
self.fitdict.update({'mean:'+nm:getattr(self,nm+'s')[chx] for nm in ['LD1','LD2']})
#Removing medians:
for row in self.fitdict:
self.fitdict[row][np.isnan(self.fitdict[row])]=np.nanmedian(np.isnan(self.fitdict[row]))
dists=[self.fitdict[cname] for cname in self.gp.get_parameter_names()]
self.init_mcmc_params=np.column_stack(dists)
print(np.shape(self.init_mcmc_params))
#[,:])
mask=abs(self.Lcurve.lc[:,0]-self.gp.get_parameter('mean:tcen'))<2.75
PlotModel(self.Lcurve.lc[mask,:], self.gp, np.median(self.init_mcmc_params,axis=0), fname=self.settings.outfilesloc+self.objname+'_initfit.png')
#dists=[np.random.normal(self.gp.get_parameter(nm),abs(self.gp.get_parameter(nm))**0.25,len(chx)) for nm in ['kernel:terms[0]:log_amp', 'kernel:terms[0]:log_timescale', 'kernel:terms[0]:log_period']]+\
# [self.tcens[chx],self.bs[chx],self.vels[~np.isnan(self.vels)][chx],self.RpRss[chx],self.LD1s[chx],self.LD2s[chx]]
#'kernel:terms[0]:log_factor', 'kernel:terms[1]:log_sigma' <- frozen and not used
#print(len(pos[0,:]))
#[np.array(list(initparams.values())) *(1+ 1.5e-4*np.random.normal()) for i in range(nwalkers)]
print("EMCEE HAPPENING. INIT DISTS:")
print(self.init_mcmc_params[0,:])
print(self.gp.get_parameter_names())
print(self.priors.keys())
#print(' , '.join( [str(list(self.priors.keys())[nk]) [-8:]+' - '+str(abs(self.priors[list(self.priors.keys())[nk]]-self.init_mcmc_params[nk]))[:5] for nk in range(len(self.priors.keys()))]) )
print(' \n '.join([str(list(self.priors.keys())[nk])+' - '+str(self.priors[list(self.priors.keys())[nk]][0])+" > "+str(np.median(self.init_mcmc_params[nk]))+\
" < "+str(self.priors[list(self.priors.keys())[nk]][1]) for nk in range(len(self.priors.keys()))]\
)) if self.settings.verbose else 0
self.sampler = emcee.EnsembleSampler(self.settings.nwalkers, len(self.gp.get_parameter_vector()), MonoLogProb, args=(self.Lcurve.lc,self.priors,self.gp), threads=self.settings.nthreads)
self.sampler.run_mcmc(self.init_mcmc_params, 1, rstate0=np.random.get_state())
self.sampler.run_mcmc(self.init_mcmc_params, self.settings.nsteps, rstate0=np.random.get_state())
#Trimming samples:
ncut=np.min([int(self.settings.nsteps*0.25),3000])
lnprobs=self.sampler.lnprobability[:,ncut:]#.reshape(-1)
prcnt=np.percentile(lnprobs,[50,95],axis=1)
#"Failed" walkers are where the 97th percentile is below the median of the rest
good_wlkrs=(prcnt[1]>np.median(prcnt[0]))
self.sampleheaders=self.gp.get_parameter_names()+['logprob']
self.samples = self.sampler.chain[good_wlkrs, ncut:, :].reshape((-1, len(self.gp.get_parameter_vector())))
self.samples = np.column_stack((self.samples,self.sampler.lnprobability[good_wlkrs,ncut:].reshape(-1)))
#Making impact parameter always positive:
self.samples[:,1]=abs(self.amples[:,1])
self.SaveMCMC()
def SaveMCMC(self):
np.save(self.settings.outfilesloc+self.objname+'_MCMCsamples',self.samples)
def MonoFinalPars(self,model=None):
if model is None and hasattr(self,'mono1'):
model=self.mono1
#Taking random Nsamples from samples to put through calculations
#Need to form assymetric gaussians of Star Dat parameters if not equal
#Rstardist2=np.hstack((np.sort(Rstardist[:, 0])[0:int(nsamp/2)], np.sort(Rstardist[:, 1])[int(nsamp/2):] ))
modelmeanvals=[col.find('mean:')!=-1 for col in self.sampleheaders]
model.gen_PDFs({modelmeanvals[nmmv].split(":")[-1]+'s':self.samples[:,modelmeanvals][:,nmmv] for nmmv in modelmeanvals})
rn=np.random.choice(len(self.samples[:,0]),self.settings.npdf,replace=False)
#for model in meanmodels:
setattr(model,Rps,(self.samples[rn,self.sampleheaders=='mean:RpRs']*695500000*self.Rss)/6.371e6)#inearths
setattr(model,'Prob_pl',len(model.Rps[model.Rps<(1.5*11.2)])/len(model.Rps))
aest,Pest=VelToOrbit(self.samples[rn,self.sampleheaders=='mean:vel'], self.sdenss, self.Mss)
setattr(model,smas,aest)
setattr(model,Ps,Pest)
setattr(model,Mps,PlanetRtoM(model.Rps))
setattr(model,Krvs,((2.*np.pi*6.67e-11)/(model.Ps*86400))**(1./3.)*(model.Mps*5.96e24/((1.96e30*self.Mss)**(2./3.))))
#sigs=np.array([2.2750131948178987, , 97.7249868051821])
sigs=[15.865525393145707, 50.0, 84.13447460685429]
for val in ['Rps','smas','Ps','Mps','Krvs']:
percnts=np.percentile(np.array(getattr(model,val)), sigs)
setattr(model,val[:-1],percnts[1])
setattr(model,val[:-1]+'uerr',(percnts[2]-percnts[1]))
setattr(model,val[:-1]+'derr',(percnts[1]-percnts[0]))
def PlotMCMC(usecols=None):
import corner
newnames={'kernel:terms[0]:log_amp':'$\log{a}$',
'kernel:terms[0]:log_timescale':'$\log{\tau}$',
'kernel:terms[0]:log_period':'$\log{P}$',
'mean:tcen':'$t_{\rm cen}$',
'mean:b':'$b$',
'mean:vel':'$v\'$',
'mean:RpRs':'$R_p/R_s$',
'mean:LD1':'LD$_1$',
'mean:LD2':'LD$_2$'}
if usecols is None:
#Plotting corner with all parameter names
usecols=self.gp.get_parameter_names()
plt.figure(1)
Npars=len(samples[0]-1)
tobeplotted=np.in1d(gp.get_parameter_names(),usecols)
#Clipping extreme values (top.bottom 0.1 percentiles)
toclip=np.array([(np.percentile(self.samples[:,t],99.9)>self.samples[:,t]) // (self.samples[:,t]>np.percentile(self.samples[:,t],0.1)) for t in range(Npars)[tobeplotted]]).all(axis=0)
clipsamples=self.samples[toclip]
#Earmarking the difference between GP and non
labs = [newnames[key] for key in gp.get_parameter_names() if key in usecols]
#This plots the corner:
fig = corner.corner(clipsamples[:,tobeplotted], labels=labs, quantiles=[0.16, 0.5, 0.84], plot_datapoints=False,range=np.tile(0.985,Npars))
#Making sure the lightcurve plot doesnt overstep the corner
ndim=np.sum(tobeplotted)
rows=(ndim-1)/2
cols=(ndim-1)/2
#Printing Kepler name on plot
plt.subplot(ndim,ndim,ndim+3).axis('off')
plt.title(str(self.objname), fontsize=22)
#This plots the model on the same plot as the corner
ax = plt.subplot2grid((ndim,ndim), (0, ndim-cols), rowspan=rows-1-int(GP), colspan=cols)
modelfits=PlotModel(self.Lcurve.lc, self.gp, np.nanmedian(self.samples,axis=0)) #(lc, samples, scale=1.0, GP=GP)
#If we do a Gaussian Process fit, plotting both the transit-subtractedGP model and the residuals
ax=plt.subplot2grid((ndim,ndim), (rows-2, ndim-cols), rowspan=1, colspan=cols)
_=PlotModel(self.Lcurve.lc, self.gp, np.nanmedian(self.samples,axis=0), prevmodels=modelfits, subGP=True)
#plotting residuals beneath:
ax = plt.subplot2grid((ndim,ndim), (rows-1, ndim-cols), rowspan=1, colspan=cols)
_=PlotModel(self.Lcurve.lc, self.gp, np.nanmedian(self.samples,axis=0), prevmodels=modelfits, residuals=True)
#Adding text values to MCMC pdf
#Plotting text wrt to residuals plot...
xlims=ax.get_xlim()
x0=(xlims[0]+0.5*(xlims[1]-xlims[0])) #Left of box in x
xwid=0.5*(xlims[1]-xlims[0]) #Total width of ybox
ylims=ax.get_ylim()
y1=(ylims[0]-0.5*(ylims[1]-ylims[0])) #Top of y box
yheight=-2.5*(ylims[1]-ylims[0]) #Total height of ybox
from matplotlib import rc;rc('text', usetex=True)
#matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
matplotlib.rcParams['text.latex.preamble'] = [r'\boldmath']#Needed for latex commands here:
plt.text(x0+0.14*xwid,y1,"EPIC"+str(self.objname),fontsize=20)
n_textpos=0
txt=["EPIC"+str(self.objname)]
sigs=[2.2750131948178987, 15.865525393145707, 50.0, 84.13447460685429, 97.7249868051821]
for lab in labs:
xloc=x0+0.05*xwid
yloc=y1+0.02*yheight+(0.05*yheight*(n+1))
if lab=='$t_{\rm cen}$':#Need larger float size for Tcen...
txt+=[r"\textbf{"+lab+":} "+('%s' % float('%.8g' % (np.median(self.samples[:,n]))))+" +"+('%s' % float('%.2g' % (np.percentile(self.samples[:,n_textpos],sigs[3])-np.median(self.samples[:,n_textpos]))))+" -"+\
('%s' % float('%.2g' % (np.median(self.samples[:,n_textpos])-np.percentile(self.samples[:,n_textpos],sigs[1]))))]
plt.text(xloc,yloc,txt[-1])
else:
txt+=[r"\textbf{"+lab+":} "+('%s' % float('%.3g' % (np.median(self.samples[:,n]))))+" +"+('%s' % float('%.2g' % (np.percentile(self.samples[:,n_textpos],sigs[3])-np.median(self.samples[:,n_textpos]))))+" -"+\
('%s' % float('%.2g' % (np.median(self.samples[:,n_textpos])-np.percentile(self.samples[:,n_textpos],sigs[1]))))]
plt.text(xloc,yloc,txt[-1])
n_textpos+=1
info={'Rps':'$R_p (R_{\oplus})$','Ps':'Per (d)','smas':'A (au)','Mps':'$M_p (M_{\oplus})$','Krvs':'K$_{\rm rv}$(ms$^{-1}$)','Prob_pl':'ProbPl ($\%$)',\
'steffs':'Teff (K)','srads':'Rs ($R_{\odot}$)','smasss':'Ms ($M_{\odot}$)','sloggs':'logg','sdenss':'$\\rho_s (\\rho_{\odot})$'}
pdfs=self.pdflist()
for ival in pdfs:
if ival[:2]!='LD':
xloc=x0+0.05*xwid
yloc=y1+0.02*yheight+(0.05*yheight*(n_textpos+2))
vals=np.percentile(pdfs[ival],sigs)
txt+=[(r"\textbf{%s:} " % info[ival])+('%s' % float('%.3g' % vals[2]))+" +"+\
('%s' % float('%.2g' % (vals[3]-vals[2])))+" -"+('%s' % float('%.2g' % (vals[2]-vals[1])))]
plt.text(xloc,yloc,txt[-1])
n_textpos+=1
self.MonoFinalPars()
for ival in ['Rps','smas','Ps','Mps','Krvs']:
xloc=x0+0.05*xwid
yloc=y1+0.02*yheight+(0.05*yheight*(n_textpos+2))
vals=np.percentile(getattr(self.mono1,ival),sigs)
txt+=[(r"\textbf{%s:} " % info[ival])+('%s' % float('%.3g' % vals[2]))+" +"+\
('%s' % float('%.2g' % (vals[3]-vals[2])))+" -"+('%s' % float('%.2g' % (vals[2]-vals[1])))]
plt.text(xloc,yloc,txt[-1])
n_textpos+=1
xloc=x0+0.05*xwid
yloc=y1+0.02*yheight+(0.05*yheight*(2+n_textpos+1))
txt+=[(r"\textbf{%s:} " % info['Prob_pl'])+('%s' % float('%.3g' % getattr(self.mono1,'Prob_pl')))]
plt.text(xloc,yloc,txt[-1])
with open(self.settings.outfilesloc+'latextable.tex','ra') as latextable:
latextable.write(' & '.join(txt)+'/n')
#Saving as pdf. Will save up to 3 unique files.
fname='';n=0
while fname=='':
if os.path.exists(self.settings.outfilesloc+'Corner_'+str(self.objname)+'_'+str(int(n))+'.pdf'):
n+=1
else:
fname=self.settings.outfilesloc+'/Corner_'+str(EPIC)+'_'+str(int(n))+'.pdf'
plt.savefig(fname,Transparent=True,dpi=300)
plt.savefig(fname.replace('pdf','png'),Transparent=True,dpi=300)
class Monotransit():
#Monotransit detection to analyse
def __init__(self, tcen, tdur, depth, settings, name, b=0.4, RpRs=None, vel=None,\
tcenuerr=None,tduruerr=None,depthuerr=None,buerr=None,RpRsuerr=None, veluerr=None,\
tcenderr=None,tdurderr=None,depthderr=None,bderr=None,RpRsderr=None,velderr=None):
self.settings=settings
self.mononame = name
self.starname = name.split('.')[0]
self.pdfs = {}
self.update_pars(tcen, tdur, depth, b=b, RpRs=RpRs, vel=vel,
tcenuerr=tcenuerr,tduruerr=tduruerr,depthuerr=depthuerr,buerr=buerr, RpRsuerr=RpRsuerr,veluerr=veluerr,
tcenderr=tcenderr,tdurderr=tdurderr,depthderr=depthderr,bderr=bderr,RpRsderr=RpRsderr, velderr=velderr)
self.gen_PDFs()
'''
def addStarDat(self,pdflist):
self.LD1s=pdflist['LD1s']
self.LD2s=pdflist['LD2s']
self.srads=pdflist['srads']
self.sdenss=pdflist['sdenss']
self.smasss=pdflist['smasss']
self.steffs=pdflist['steffs']
self.pdfs.update({'LD1s':self.LD1s,'LD2s':self.LD2s,'srads':self.srads,'sdenss':self.sdenss,'smasss':self.smasss,'steffs':self.steffs})
'''
def update_pars(self, tcen=None, tdur=None, depth=None, b=0.4, RpRs=None, vel=None, tcenuerr=None,tduruerr=None,depthuerr=None,buerr=None,RpRsuerr=None, tcenderr=None,tdurderr=None,depthderr=None,bderr=None,RpRsderr=None,veluerr=None,velderr=None):
if tcen is not None:
self.tcen = float(tcen) # detected transit centre
self.tcenuerr = 0.15*tdur if type(tcenuerr)==type(None) else tcenuerr # estimated transit centre errors (default = 0.1*dur)
self.tcenderr = 0.15*tdur if type(tcenderr)==type(None) else tcenderr # estimated transit centre errors (default = 0.1*dur)
if tdur is not None:
self.tdur = float(tdur) # detected transit duration
self.tduruerr = 0.33*tdur if type(tcenuerr)==type(None) else tcenuerr # estimated transit duration errors (default = 0.2*dur)
self.tdurderr = 0.33*tdur if type(tcenuerr)==type(None) else tcenuerr # estimated transit duration errors (default = 0.2*dur)
if depth is not None:
self.depth = float(depth) # detected transit depth
self.depthuerr = 0.33*depth if type(tcenuerr)==type(None) else tcenuerr # estimated transit depth errors (default = 0.1*depth)
self.depthderr = 0.33*depth if type(tcenuerr)==type(None) else tcenuerr # estimated transit depth errors (default = 0.1*depth)
self.b = 0.5 if type(b)==type(None) else b # estimated impact parameter (default = 0.5)
self.buerr = 0.5 if type(buerr)==type(None) else buerr # estimated impact parameter errors (default = 0.5)
self.bderr = 0.5 if type(bderr)==type(None) else bderr # estimated impact parameter errors (default = 0.5)
self.RpRs = self.depth**0.5 if type(RpRs)==type(None) else RpRs # Ratio of planet to star radius
self.RpRsuerr = 0.5*self.RpRs if type(RpRsuerr)==type(None) else RpRsuerr # Ratio of planet to star radius errors (default = 25%)
self.RpRsderr = 0.5*self.RpRs if type(RpRsderr)==type(None) else RpRsderr # Ratio of planet to star radius errors (default = 25%)
#self.vel = CalcVel() # Velocity of planet relative to stellar radius
if vel is not None:
self.vel = vel # Velocity scaled to stellar radius
elif not hasattr(self,'vel'):
self.vel = None # Velocity scaled to stellar radius
def gen_PDFs(self,paramdict=None):
#Turns params into PDFs
if paramdict is None:
self.tcens=GetAssymDist(self.tcen,self.tcenuerr,self.tcenderr,nd=self.settings.npdf,returndist=True)
#self.depths=GetAssymDist(self.depth,self.depthuerr,self.depthderr,nd=self.settings.npdf,returndist=True)
self.bs=abs(GetAssymDist(self.b,self.buerr,self.bderr,nd=self.settings.npdf,returndist=True))
self.RpRss=GetAssymDist(self.RpRs,self.RpRsuerr,self.RpRsderr,nd=self.settings.npdf,returndist=True)
#Velocity tends to get "Nan"-y, so looping to avoid that:
nanvels=np.tile(True,self.settings.npdf)
v=np.zeros(self.settings.npdf)
while np.sum(nanvels)>self.settings.npdf*0.002:
v[nanvels]=CalcVel(np.random.normal(self.tdur,self.tdur*0.15,nanvels.sum()), self.bs[np.random.choice(self.settings.npdf,np.sum(nanvels))], self.RpRss[np.random.choice(self.settings.npdf,np.sum(nanvels))])
nanvels=(np.isnan(v))*(v<0.0)*(v>100.0)
self.vels=v
prcnts=np.diff(np.percentile(self.vels[~np.isnan(self.vels)],[15.865525393145707, 50.0, 84.13447460685429]))
self.veluerr=prcnts[1]
self.velderr=prcnts[0]
if self.vel is not None and ~np.isnan(self.vel):
#Velocity pre-defined. Distribution is not, however, so we'll use the scaled distribution of the "derived" velocity dist to give the vel errors
velrat=self.vel/np.nanmedian(self.vels)
self.vels*=velrat
self.veluerr*=velrat
self.velderr*=velrat
else:
self.vel=np.nanmedian(self.vels)
else:
#included dictionary of new "samples"
sigs=[15.865525393145707, 50.0, 84.13447460685429]
for colname in ['tcens','bs','vels','RpRss']:
setattr(self,colname,paramdict[colname])
percnts=np.percentile(np.array(getattr(model,colname)), sigs)
setattr(self,colname[:-1],percnts[1])
setattr(self,colname[:-1]+'uerr',percnts[2]-percnts[1])
setattr(self,colname[:-1]+'derr',percnts[1]-percnts[0])
self.pdfs.update({'tcens':self.tcens,'bs':self.bs,'RpRss':self.RpRss,'vels':self.vels})
#if StarPDFs is not None:
# self.pdflist.update(StarPDFs)
def calcmaxvel(self,lc,sdenss):
#Estimate maximum velocity given lightcurve duration without transit.
self.calcminp(lc)
maxvels=np.array([((18226*rho)/self.minp)**(1/3.0) for rho in abs(sdenss)])
prcnts=np.percentile(maxvels,[15.865525393145707, 50.0, 84.13447460685429])
self.maxvelderr=prcnts[1]-prcnts[0]
self.maxvel=prcnts[1]
self.maxveluerr=prcnts[2]-prcnts[1]
def calcminp(self,lc):
#finding tdur-wide jumps in folded LC
dur_jumps=np.where(np.diff(abs(lc[:,0]-self.tcen))>self.tdur)[0]
if len(dur_jumps)==0:
#No tdur-wide jumps until end of lc - using the maximum difference to a point in the lc
self.minp=np.max(abs(lc[:,0]-self.tcen))+self.tdur*0.33
else:
#Taking the first Tdur-wide jump in the folded lightcurve where a transit could be hiding.
self.minp=abs(lc[:,0]-self.tcen)[dur_jumps[0]]+self.tdur*0.33
'''
def CalcOrbit(self,denss):
#Calculating orbital information
#VelToOrbit(Vel, Rs, Ms, ecc=0, omega=0):
SMA,P=Vel2Per(denss,self.vels)
self.SMAs=SMA
self.PS=P
def update(self, **kwargs):
#Modify detection parameters...
self.tcen = kwargs.pop('tcen', self.tcen) # detected transit centre
self.tdur = kwargs.pop('tdur', self.tdur) # detected transit duration
self.depth = kwargs.pop('dep', self.depth) # detected transit depth
self.b = kwargs.pop('b', self.b) # estimated impact parameter (default = 0.4)
self.RpRs = kwargs.pop('RpRs', self.RpRs) # Ratio of planet to star radius
self.vel = kwargs.pop('vel', self.vel) # Velocity of planet relative to stellar radius
def FitParams(self,star,info):
#Returns params array needed for fitting
if info.GP:
return np.array([self.tcen,self.b,self.vel,self.RpRs])
else:
return np.array([self.tcen,self.b,self.vel,self.RpRs])
def InitialiseGP(self,settings,star,Lcurve):
import george
self.gp, res, self.lnlikfit = TrainGP(Lcurve.lc,self.tcen,star.wn)
self.newmean,self.newwn,self.a,self.tau=res
def FitPriors(self,star,settings):
#Returns priros array needed for fitting
if settings.GP:
if not self.hasattr('tau'):
self.InitialiseGP()
return np.array([[self.tcen,self.tcen,self.tcen,self.tcen],
[-1.2,1.2,0,0],
[0.0,100.0,self.vmax,self.vmaxerr],
[0.0,0.3,0,0],
[0.0,1.0,star.LD1,np.average(star.LD1uerr,star.LD1derr)],
[0.0,1.0,star.LD2,np.average(star.LD2uerr,star.LD2derr)],
[np.log(star.wn)-1.5,np.log(star.wn)+1.5,np.log(star.wn),0.3],
[self.tau-10,self.tau+10,self.tau,np.sqrt(np.abs(self.tau))],
[self.a-10,self.a+10,self.a,np.sqrt(np.abs(self.a))]])
else:
return np.array([[self.tcen,self.tcen,self.tcen,self.tcen],
[-1.2,1.2,0,0],
[0.0,100.0,self.vmax,self.vmaxerr],
[0.0,0.3,0,0],
[0.0,1.0,star.LD1,np.average(star.LD1uerr,star.LD1derr)],
[0.0,1.0,star.LD2,np.average(star.LD2uerr,star.LD2derr)]]
'''
def Optimize_mono(self,flatlc,LDprior,nopt=20):
if not hasattr(self,'priors'):
self.monoPriors()
#Cutting to short area around transit:
flatlc=flatlc[abs(flatlc[:,0]-self.tcen)<5*self.tdur]
#Optimizing initial transit parameters
opt_monomodel=MonotransitModel(tcen=self.tcen,
b=self.b,
vel=self.vel,
RpRs=self.RpRs,
LD1=LDprior['LD1'][3],
LD2=LDprior['LD2'][3])
print(self.priors)
temp_priors=self.priors.copy()
temp_priors['mean:LD1'] = LDprior['LD1']
temp_priors['mean:LD2'] = LDprior['LD2']
print("monopriors:",temp_priors) if self.settings.verbose else 0
init_neglogprob=MonoOnlyNegLogProb(opt_monomodel.get_parameter_vector(),flatlc,temp_priors,opt_monomodel)
print("nll init",init_neglogprob) if self.settings.verbose else 0
suc_res=np.zeros(7)
LD1s=np.random.normal(LDprior['LD1'][3],LDprior['LD1'][4],self.settings.npdf)
LD2s=np.random.normal(LDprior['LD2'][3],LDprior['LD2'][4],self.settings.npdf)
#Running multiple optimizations using rough grid of important model paramsself.
for n_par in np.random.choice(self.settings.npdf,nopt,replace=False):
initpars=np.array([self.tcens[n_par],self.bs[n_par],self.vels[n_par],self.RpRss[n_par],LD1s[n_par],LD2s[n_par]])
result = opt.minimize(MonoOnlyNegLogProb, initpars, args=(flatlc, temp_priors, opt_monomodel), method="L-BFGS-B")
if result.success:
suc_res=np.vstack((suc_res,np.hstack((result.x,result.fun))))
if len(np.shape(suc_res))==1:
raise ValueError("No successful Monotransit minimizations")
else:
#Ordering successful optimizations by neglogprob...
suc_res=suc_res[1:,:]
print("All_Results:",suc_res) if self.settings.verbose else 0
suc_res=suc_res[~np.isnan(suc_res[:,-1]),:]
bestres=suc_res[np.argmin(suc_res[:,-1])]
self.bestres=bestres
print("Best_Result:",bestres) if self.settings.verbose else 0
#tcen, tdur, depth, b=0.4, RpRs=None, vel=None
self.update_pars(bestres[0], CalcTdur(bestres[2], bestres[1], bestres[3]), bestres[3]**2, b=bestres[1], RpRs=bestres[3],vel=bestres[2])
print("initial fit nll: ",init_neglogprob," to new fit nll: ",bestres[-1]) if self.settings.verbose else 0
#for nn,name in enumerate(['mean:tcen', 'mean:b', 'mean:vel', 'mean:RpRs', 'mean:LD1', 'mean:LD2']):
# self.gp.set_parameter(name,bestres[nn])
PlotBestMono(flatlc, opt_monomodel, bestres[:-1], fname=self.settings.outfilesloc+self.mononame+'_init_monoonly_fit.png')
def monoPriors(self,name='mean'):
self.priors={}
self.priors.update({name+':tcen':[self.tcen-self.tdur*0.3,self.tcen+self.tdur*0.3],
name+':b':[0.0,1.25],
name+':vel':[0,self.maxvel+5*self.maxveluerr,'normlim',self.maxvel,self.maxveluerr],
name+':RpRs':[0.02,0.25]
})
return self.priors
'''
def RunModel(self,lc,gp):
self.modelPriors()
#Returning monotransit model from information.
sampler = emcee.EnsembleSampler(self.settings.nwalkers, len(gp.get_parameter_vector()), MonoLogProb, args=(lc,self.priors,gp), threads=self.settings.nthreads)
chx=np.random.choice(np.sum(~np.isnan(self.vels)),self.settings.nwalkers,replace=False)
dists=[np.random.normal(gp.get_parameter(nm),abs(gp.get_parameter(nm))**0.25,len(chx)) for nm in ['kernel:terms[0]:log_amp', 'kernel:terms[0]:log_timescale', 'kernel:terms[0]:log_period']]+\
[self.tcens[chx],self.bs[chx],self.vels[~np.isnan(self.vels)][chx],self.RpRss[chx],self.LD1s[chx],self.LD2s[chx]]
#'kernel:terms[0]:log_factor', 'kernel:terms[1]:log_sigma' <- frozen and not used
col=['kernel:terms[0]:log_amp', 'kernel:terms[0]:log_timescale', 'kernel:terms[0]:log_period',\
'mean:tcen','mean:b','mean:vel','mean:RpRs','mean:LD1','mean:LD2']
pos=np.column_stack(dists)
self.init_mcmc_params=pos
#print(len(pos[0,:]))
#[np.array(list(initparams.values())) *(1+ 1.5e-4*np.random.normal()) for i in range(nwalkers)]
Nsteps = 30000
sampler.run_mcmc(pos, 1, rstate0=np.random.get_state())
sampler.run_mcmc(pos, self.settings.nsteps, rstate0=np.random.get_state())
self.samples = sampler.chain[:, 3000:, :].reshape((-1, ndim))
return self.samples
#.light_curve(np.arange(0,40,0.024),texp=0.024))
'''
def SaveInput(self,stellarpdfs):
np.save(self.settings.fitsloc+self.mononame+'_inputsamples',np.column_stack(([self.pdfs[ipdf] for ipdf in self.pdfs.keys()]+[stellarpdfs[ipdf] for ipdf in stellarpdfs.keys()])))
class Lightcurve():
# Lightcurve class - contains all lightcurve information
def __init__(self, file, epic):
self.fileloc=file
self.lc,self.mag=OpenLC(self.fileloc)
try:
self.mag=k2_quickdat(epic)['k2_kepmag']
except:
self.mag=self.mag
self.lc=self.lc[~np.isnan(np.sum(self.lc,axis=1))]
self.fluxmed=np.nanmedian(self.lc[:,1])
self.lc[:,1:]/=self.fluxmed
self.lc=self.lc[AnomCutDiff(self.lc[:,1])]
self.range=self.lc[-1,0]-self.lc[self.lc[:,0]!=0.0,0][0]
self.cadence=np.nanmedian(np.diff(self.lc[:,0]))
self.lcmask=np.tile(True,len(self.lc[:,0]))
def BinLC(self, binsize,gap=0.4):
#Bins lightcurve to some time interval. Finds gaps in the lightcurve using the threshold "gap"
spl_ts=np.array_split(self.lc[:,0],np.where(np.diff(self.lc[:,0])>gap)[0]+1)
bins=np.hstack([np.arange(s[0],s[-1],binsize) for s in spl_ts])
digitized = np.digitize(self.lc[:,0], bins)
ws=(self.lc[:,2])**-2.0
ws=np.where(ws==0.0,np.median(ws[ws!=0.0]),ws)
bin_means = np.array([np.ma.average(self.lc[digitized==i,2],weights=ws[digitized==i]) for i in range(np.max(digitized))])
bin_stds = np.array([np.ma.average((self.lc[digitized==i,2]-bin_means[i])**2, weights=ws[digitized==i]) for i in range(np.max(digitized))])
whok=(~np.isnan(bin_means))&(bin_means!=0.0)
self.binlc=np.column_stack((bins,bin_means,bin_stds))[whok,:]
self.binsize=binsize
return self.binlc
'''
def keys(self):
return ['NPTS','SKY_TILE','RA_OBJ','DEC_OBJ','BMAG','VMAG','JMAG','KMAG','HMAG','PMRA','PMDEC','PMRAERR','PMDECERR','NFIELDS']
def keyvals(self,*args):
#Returns values for the given key list
arr=[]
for ke in args[0]:
exec('arr+=[self.%s]' % ke)
return arr
'''
def savelc(self):
np.save(self.settings.fitsloc+self.OBJNAME.replace(' ','')+'_bin.npy',self.get_binlc())
np.save(self.settings.fitsloc+self.OBJNAME.replace(' ','')+'.npy',self.get_lc())
def flatten(self,winsize=4.5,stepsize=0.125):
import k2flatten
return k2flatten.ReduceNoise(self.lc,winsize=winsize,stepsize=stepsize)
def calc_mask(self,meanmodels):
for model in meanmodels:
if hasattr(model,'P'):
self.lcmask[((abs(self.lc[:,0]-model.tcen)%model.P)<(self.cadence+model.tdur*0.5))+((abs(self.lc[:,0]-model.tcen)%model.P)>(model.P-(model.tdur*0.5+self.cadence)))]=False
else:
#Mono
self.lcmask[abs(self.lc[:,0]-model.tcen)<(self.cadence+model.tdur*0.5)]=False
class MonotransitModel(celerite.modeling.Model):
parameter_names = ("tcen", "b","vel","RpRs","LD1","LD2")
def get_value(self,t):
#Getting fine cadence (cad/100). Integrating later:
cad=np.median(np.diff(t))
oversamp=10#Oversampling
finetime=np.empty(0)
for i in range(len(t)):
finetime=np.hstack((finetime, np.linspace(t[i]-(1-1/oversamp)*(cad/2.), t[i]+(1-1/oversamp)*(cad/2.), oversamp) ))
finetime=np.sort(finetime)
z = np.sqrt(self.b**2+(self.vel*(finetime - self.tcen))**2)
#Removed the flux component below. Can be done by GP
model = occultquad(z, self.RpRs, np.array((self.LD1,self.LD2)))
return np.average(np.resize(model, (len(t), oversamp)), axis=1)
'''
#TBD:
class MonotransitModel_x2(celerite.modeling.Model):
nmodels=2
parameter_names = tuple([item for sublist in [["tcen"+str(n), "b"+str(n),"vel"+str(n),"RpRs"+str(n)] for n in range(1,nmodels+1)]+[["LD1","LD2"]] for item in sublist ])
def get_value(self,t):
cad=np.median(np.diff(t))
oversamp=10#Oversampling
finetime=np.empty(0)
for i in range(len(t)):
finetime=np.hstack((finetime, np.linspace(t[i]-(1-1/oversamp)*(cad/2.), t[i]+(1-1/oversamp)*(cad/2.), oversamp) ))
finetime=np.sort(finetime)
model=np.zeros((len(finetime)))
for nmod in range(nmodels):
z = np.sqrt(getattr(self,'b'+str(nmod))**2+(getattr(self,'vel'+str(nmod))*(finetime - getattr(self,'tcen'+str(nmod))))**2)
#Removed the flux component below. Can be done by GP
model *= occultquad(z, getattr(self,'RpRs'+str(nmod)), np.array((self.LD1,self.LD2)))
return np.average(np.resize(model, (len(t), oversamp)), axis=1)
class MonotransitModel_x3(celerite.modeling.Model):
nmodels =3
parameter_names = tuple([item for sublist in [["tcen"+str(n), "b"+str(n),"vel"+str(n),"RpRs"+str(n)] for n in range(1,nmodels+1)]+[["LD1","LD2"]] for item in sublist ])
def get_value(self,t):
cad=np.median(np.diff(t))
oversamp=10#Oversampling
finetime=np.empty(0)
for i in range(len(t)):
finetime=np.hstack((finetime, np.linspace(t[i]-(1-1/oversamp)*(cad/2.), t[i]+(1-1/oversamp)*(cad/2.), oversamp) ))
finetime=np.sort(finetime)
model=np.zeros((len(finetime)))
for nmod in range(nmodels):
z = np.sqrt(getattr(self,'b'+str(nmod))**2+(getattr(self,'vel'+str(nmod))*(finetime - getattr(self,'tcen'+str(nmod))))**2)
#Removed the flux component below. Can be done by GP
model *= occultquad(z, getattr(self,'RpRs'+str(nmod)), np.array((self.LD1,self.LD2)))
return np.average(np.resize(model, (len(t), oversamp)), axis=1)
class MonotransitModel_plus_pl(celerite.modeling.Model):
parameter_names = ("monotcen", "monob","monovel","monoRpRs","multitcen", "multib","multiP","multiRpRs","multia_Rs","LD1","LD2")
def get_value(self,t):
cad=np.median(np.diff(t))
oversamp=10#Oversampling
finetime=np.empty(0)
for i in range(len(t)):
finetime=np.hstack((finetime, np.linspace(t[i]-(1-1/oversamp)*(cad/2.), t[i]+(1-1/oversamp)*(cad/2.), oversamp) ))
finetime=np.sort(finetime)
model=np.zeros((len(finetime)))
for nmod in range(nmodels):
z = np.sqrt(getattr(self,'b'+str(nmod))**2+(getattr(self,'vel'+str(nmod))*(finetime - getattr(self,'tcen'+str(nmod))))**2)
#Removed the flux component below. Can be done by GP
model *= occultquad(z, getattr(self,'RpRs'+str(nmod)), np.array((self.LD1,self.LD2)))
return np.average(np.resize(model, (len(t), oversamp)), axis=1)
class MonotransitModel_plus_plx2(celerite.modeling.Model):
parameter_names = tuple(["monotcen", "monob","monovel","monoRpRs"]+
['multi'+item for sublist in [["tcen"+str(n), "b"+str(n),"P"+str(n),"RpRs"+str(n),"a_Rs"+str(n)] for n in range(1,3)]+
[["LD1","LD2"]] for item in sublist ])
def get_value(self,t):
cad=np.median(np.diff(t))
oversamp=10#Oversampling
finetime=np.empty(0)
for i in range(len(t)):
finetime=np.hstack((finetime, np.linspace(t[i]-(1-1/oversamp)*(cad/2.), t[i]+(1-1/oversamp)*(cad/2.), oversamp) ))
finetime=np.sort(finetime)
model=np.zeros((len(finetime)))
for nmod in range(nmodels):
z = np.sqrt(getattr(self,'b'+str(nmod))**2+(getattr(self,'vel'+str(nmod))*(finetime - getattr(self,'tcen'+str(nmod))))**2)
#Removed the flux component below. Can be done by GP
model *= occultquad(z, getattr(self,'RpRs'+str(nmod)), np.array((self.LD1,self.LD2)))
return np.average(np.resize(model, (len(t), oversamp)), axis=1)
class MonotransitModelx3_plus_plx2(celerite.modeling.Model):
parameter_names = tuple(['mono'+item for sublist in [["tcen"+str(n), "b"+str(n),"vel"+str(n),"RpRs"+str(n)] for n in range(1,4)]]+\
['multi'+item for sublist in [["tcen"+str(n), "b"+str(n),"P"+str(n),"RpRs"+str(n),"a_Rs"+str(n)] for n in range(4,6)]]+\
["LD1","LD2"]])
def get_value(self,t):
cad=np.median(np.diff(t))
oversamp=10#Oversampling
finetime=np.empty(0)
for i in range(len(t)):
finetime=np.hstack((finetime, np.linspace(t[i]-(1-1/oversamp)*(cad/2.), t[i]+(1-1/oversamp)*(cad/2.), oversamp) ))
finetime=np.sort(finetime)
model=np.zeros((len(finetime)))
for nmod in range(nmodels):
z = np.sqrt(getattr(self,'b'+str(nmod))**2+(getattr(self,'vel'+str(nmod))*(finetime - getattr(self,'tcen'+str(nmod))))**2)
#Removed the flux component below. Can be done by GP
model *= occultquad(z, getattr(self,'RpRs'+str(nmod)), np.array((self.LD1,self.LD2)))
return np.average(np.resize(model, (len(t), oversamp)), axis=1)
'''
def k2_quickdat(kic):
kicdat=pd.DataFrame.from_csv("https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=k2targets&where=epic_number=%27"+str(int(kic))+"%27")
if len(kicdat.shape)>1:
print("Multiple entries - ",str(kicdat.shape))
kicdat=kicdat.iloc[0]
return kicdat
'''
def MonoLnPriorDict(params,priors):
lp=0
for key in priors.keys():
#print(params[n],key,priors[key])
if params[key]<priors[key][0] or params[key]>priors[key][1]:
#hidesously low number that still has gradient towards "mean" of the uniform priors.
lp-=1e15 * (params[key]-(0.5*(priors[key][0]+priors[key][1])))**2
#print(key," over prior limit")
if len(priors[key])>2:
if priors[key][2]=='gaussian':
lp+=stats.norm(priors[key][3],priors[key][4]).pdf(params[key])
elif priors[key][2]=='normlim':
#Special velocity prior from min period & density:
lp+=((1.0-stats.norm.cdf(params[key],priors[key][3],priors[key][4]))*params[key])**2
return lp
'''
def MonoLnPrior(params,priors):
lp=0
for n,key in enumerate(priors.keys()):
#print(params[n],key,priors[key])
if params[n]<priors[key][0] or params[n]>priors[key][1]:
#hideously low number that still has gradient towards "mean" of the uniform priors. Scaled by prior width
lp-=1e20 * (((params[n]-0.5*(priors[key][0]+priors[key][1])))/(priors[key][1]-priors[key][0]))**2
#print(key," over prior limit")
if len(priors[key])>2:
if priors[key][2]=='gaussian':
lp+=stats.norm(priors[key][3],priors[key][4]).pdf(params[n])
elif priors[key][2]=='normlim':
#Special velocity prior from min period & density:
lp+=((1.0-stats.norm.cdf(params[n],priors[key][3],priors[key][4]))*params[n])**2
elif priors[key][2]=='evans':
#Evans 2015 limit on amplitude - same as p(Ai) = Gam(1, 100) (apparently, although I cant see it.)
#Basically prior is ln
lp-=priors[key][3]*np.exp(params[n])#
return lp
def MonoLogProb(params,lc,priors,gp):
gp.set_parameter_vector(params)
gp.compute(lc[:,0],lc[:,2])
ll = gp.log_likelihood(lc[:,1])
lp = MonoLnPrior(params,priors)
return (ll+lp)
def MonoNegLogProb(params,lc,priors,gp):
return -1*MonoLogProb(params,lc,priors,gp)
def MonoOnlyLogProb(params,lc,priors,monomodel):
monomodel.set_parameter_vector(params)
ll = np.sum(-2*lc[:,2]**-2*(lc[:,1]-monomodel.get_value(lc[:,0]))**2)#-(0.5*len(lc[:,0]))*np.log(2*np.pi*lc[:,2]**2) #Constant not neceaary for gradient descent
lp = MonoLnPrior(params,priors)
return (ll+lp)
def MonoOnlyNegLogProb(params,lc,priors,monomodel):
return -1*MonoOnlyLogProb(params,lc,priors,monomodel)
class RotationTerm(celerite.terms.Term):
parameter_names = ("log_amp", "log_timescale", "log_period", "log_factor")
def get_real_coefficients(self, params):
log_amp, log_timescale, log_period, log_factor = params
f = np.exp(log_factor)
return (
np.exp(log_amp) * (1.0 + f) / (2.0 + f),
np.exp(-log_timescale),
)
def get_complex_coefficients(self, params):
log_amp, log_timescale, log_period, log_factor = params
f = np.exp(log_factor)
return (
np.exp(log_amp) / (2.0 + f),
0.0,
np.exp(-log_timescale),
2*np.pi*np.exp(-log_period),
)
def neg_log_like(params, y, gp):
gp.set_parameter_vector(params)
return -gp.log_likelihood(y)
def grad_neg_log_like(params, y, gp):
gp.set_parameter_vector(params)
return -gp.grad_log_likelihood(y)[1]
'''
def grad_nll(p,gp,y,prior=[]):
#Gradient of the objective function for TrainGP
gp.set_parameter_vector(p)
return -gp.grad_log_likelihood(y, quiet=True)
def nll_gp(p,gp,y,prior=[]):
#inverted LnLikelihood function for GP training
gp.set_parameter_vector(p)
if prior!=[]:
prob=MonoLogProb(p,y,prior,gp)
return -prob if np.isfinite(prob) else 1e25
else:
ll = gp.log_likelihood(y, quiet=True)
return -ll if np.isfinite(ll) else 1e25'''
def VelToOrbit(vel, Rs, Ms, ecc=0, omega=0,timebin=86400.0):
'''Takes in velocity (in units of stellar radius), Stellar radius estimate and (later) eccentricity & angle of periastron.
Returns Semi major axis (AU) and period (days)'''
Rs=Rs*695500000# if Rs<5 else Rs
Ms=Ms*1.96e30# if Ms<5 else Ms
SMA=(6.67e-11*Ms)/((vel*Rs/86400.)**2)
Per=(2*np.pi*SMA)/(vel*Rs/86400)
return SMA/1.49e11, Per/86400
def getKeplerLDs(Ts,logg=4.43812,FeH=0.0,how='2'):
#Get Kepler Limb darkening coefficients.
from scipy.interpolate import CloughTocher2DInterpolator as ct2d
#print(label)
types={'1':[3],'2':[4, 5],'3':[6, 7, 8],'4':[9, 10, 11, 12]}
if how in types:
checkint = types[how]
#print(checkint)
else:
print("no key...")
arr = np.genfromtxt("KeplerLDlaws.txt",skip_header=2)
FeHarr=np.unique(arr[:, 2])
#Just using a single value of FeH
if not (type(FeH)==float) and not (type(FeH)==int):
FeH=np.nanmedian(FeH)
FeH=FeHarr[find_nearest(FeHarr,FeH)]
feh_ix=arr[:,2]==FeH
feh_ix=arr[:,2]==FeH
Tlen=1 if type(Ts)==float or type(Ts)==int else len(Ts)
outarr=np.zeros((Tlen,len(checkint)))
for n,i in enumerate(checkint):
u_interp=ct2d(np.column_stack((arr[feh_ix,0],arr[feh_ix,1])),arr[feh_ix,i])
if (type(Ts)==float)+(type(Ts)==int) and ((Ts<50000.)*(Ts>=2000)):
outarr[0,n]=u_interp(Ts,logg)
elif ((Ts<50000.)*(Ts>=2000)).all():
if type(logg)==float:
outarr[:,n]=np.array([u_interp(T,logg) for T in Ts])
else:
outarr[:,n]=np.array([u_interp(Ts[t],logg[t]) for t in range(len(Ts))])
else:
print('Temperature outside limits')
outarr=None
break
return outarr
def nonan(lc):
return np.logical_not(np.isnan(np.sum(lc,axis=1)))
def AnomCutDiff(flux,thresh=4.2):
#Uses differences between points to establish anomalies.
#Only removes single points with differences to both neighbouring points greater than threshold above median difference (ie ~rms)
#Fast: 0.05s for 1 million-point array.
#Must be nan-cut first
diffarr=np.vstack((np.diff(flux[1:]),np.diff(flux[:-1])))
diffarr/=np.median(abs(diffarr[0,:]))
anoms=np.hstack((True,((diffarr[0,:]*diffarr[1,:])>0)+(abs(diffarr[0,:])<thresh)+(abs(diffarr[1,:])<thresh),True))
return anoms
def CalcTdur(vel, b, p):
'''Caculates a velocity (in v/Rs) from the input transit duration Tdur, impact parameter b and planet-to-star ratio p'''
# In Rs per day
return (2*(1+p)*np.sqrt(1-(b/(1+p))**2))/vel
def CalcVel(Tdur, b, p):
'''Caculates a velocity (in v/Rs) from the input transit duration Tdur, impact parameter b and planet-to-star ratio p'''
# In Rs per day
return (2*(1+p)*np.sqrt(1-(b/(1+p))**2))/Tdur
def PlotBestMono(lc, monomodel, vector, fname=None):
cad=np.median(np.diff(lc[:,0]))
t=np.arange(lc[0,0],lc[-1,0]+0.01,cad)
monomodel.set_parameter_vector(vector)
ypred=monomodel.get_value(t)
plt.errorbar(lc[:, 0], lc[:,1], yerr=lc[:, 2], fmt='.',color='#999999')
plt.plot(lc[:, 0], lc[:,1], '.',color='#333399')
plt.plot(t,ypred,'--',color='#003333',linewidth=2.0,label='Median transit model fit')
if fname is not None:
plt.savefig(fname)
def PlotModel(lc, model, vector, prevmodels=[], plot=True, residuals=False, scale=1, nx=10000, GP=False, subGP=False, monomodel=0, verbose=True,fname=None):
# - lc : lightcurve
# - model : celerite-style model (eg gp) to apply the vector to
# - vector : best-fit parameters to use
cad=np.median(np.diff(lc[:,0]))
t=np.arange(lc[0,0],lc[-1,0]+0.01,cad)
if len(prevmodels)==0:
model.set_parameter_vector(vector)
model.compute(lc[:,0],lc[:,2])
ypreds,varpreds=model.predict(lc[:,1], t)
stds=np.sqrt(np.diag(varpreds))
model.mean.get_value(t)
modelfits=np.column_stack((ypreds-stds*2,ypreds-stds,ypreds,ypreds+stds,ypreds+stds*2,model.mean.get_value(t)))
else:
modelfits=prevmodels
if residuals:
#Subtracting bestfit model from both flux and model to give residuals
newmodelfits=np.column_stack((modelfits[:,:5]-np.tile(modelfits[:,2], (5, 1)).swapaxes(0, 1),modelfits[:,5])) #subtracting median fit
Ploty=lc[:,1]-modelfits[:, 2][(np.round((lc[:,0]-lc[0,0])/0.020431700249901041)).astype(int)]
#p.xlim([t[np.where(redfits[:,2]==np.min(redfits[:,2]))]-1.6, t[np.where(redfits[:,2]==np.min(redfits[:,2]))]+1.6])
nomys=None #Not plotting the non-GP model.
elif subGP:
newmodelfits=np.column_stack((modelfits[:,:5]-np.tile(nomys, (5, 1)).swapaxes(0, 1),modelfits[:,5])) #subtracting median fit
Ploty=lc[:,1]-modelfits[:,5]
#p.xlim([t[np.where(redfits[:,2]==np.min(redfits[:,2]))]-1.6, t[np.where(redfits[:,2]==np.min(redfits[:,2]))]+1.6])
nomys=None #Not plotting the non-GP model.
else:
Ploty=lc[:,1]
newmodelfits=np.copy(modelfits)
if plot:
plt.errorbar(lc[:, 0], lc[:,1], yerr=lc[:, 2], fmt=',',color='#999999',alpha=0.8,zorder=-100)
plt.plot(lc[:, 0], lc[:,1], '.',color='#333399')
#Plotting 1-sigma error region and models
print(np.shape(newmodelfits))
plt.fill(np.hstack((t,t[::-1])),
np.hstack((newmodelfits[:,2]-(newmodelfits[:,2]-newmodelfits[:,1]),(newmodelfits[:,2]+(newmodelfits[:,3]-newmodelfits[:,2]))[::-1])),
'#3399CC', linewidth=0,label='$1-\sigma$ region ('+str(scale*100)+'% scaled)',alpha=0.5)
plt.fill(np.hstack((t,t[::-1])),
np.hstack(((1.0+newmodelfits[:,2]-newmodelfits[:,5])-(newmodelfits[:,2]-newmodelfits[:,1]),((1.0+newmodelfits[:,2]-newmodelfits[:,5])+(newmodelfits[:,3]-newmodelfits[:,2]))[::-1])),
'#66BBCC', linewidth=0,label='$1-\sigma$ region without transit',alpha=0.5)
plt.plot(t,modelfits[:,2],'-',color='#003333',linewidth=2.0,label='Median model fit')
if not residuals and not subGP:
plt.plot(t,model.mean.get_value(t),'--',color='#003333',linewidth=2.0,label='Median transit model fit')
if not residuals and not subGP:
#Putting title on upper (non-residuals) graph
plt.title('Best fit model')
plt.legend(loc=3,fontsize=9)
if fname is not None:
plt.savefig(fname)
return modelfits
|
mit
|
rallured/PyXFocus
|
examples/axro/retraceError.py
|
1
|
1996
|
import numpy as np
import matplotlib.pyplot as plt
import pdb
import scipy.optimize as opt
import traces.surfaces as surf
import traces.transformations as tran
import traces.analyses as anal
import traces.sources as sources
import traces.conicsolve as conic
pcoeff,pax,paz = np.genfromtxt('/home/rallured/Dropbox/AXRO/'
'Alignment/CoarseAlignment/150615_OP1S09Coeffs.txt')
pcoeff = pcoeff/1000.
primc = [pcoeff,pax,pax]
def tracePrimary(primCoeffs=None,primalign=np.zeros(6)):
"""
Trace rays from focus to primary, off retroreflector,
then back to focus. Return spot centroids.
"""
#Set up source
primfoc = conic.primfocus(220.,8400.)
r1 = conic.primrad(8500.,220.,8400.)
rays = sources.subannulus(220.,r1,100./220,100000,zhat=1.)
tran.pointTo(rays,0.,0.,-primfoc,reverse=1.)
theta = np.arctan2(rays[2],rays[1])
#Trace to primary
tran.transform(rays,*primalign)
tran.transform(rays,0.,0,-8400.,0,0,0)
if primCoeffs is None:
surf.wolterprimary(rays,220.,8400.)
else:
surf.primaryLL(rays,220.,8400.,8500.,8400.,100./220.,\
*primCoeffs)
tran.transform(rays,0,0,8400.,0,0,0)
tran.itransform(rays,*primalign)
tran.reflect(rays)
#Reflect and come back
tran.transform(rays,0,0,400.,0,0,0)
surf.flat(rays)
tran.reflect(rays)
tran.transform(rays,0,0,-400.,0,0,0)
#Trace to primary
tran.transform(rays,*primalign)
tran.transform(rays,0.,0,-8400.,0,0,0)
if primCoeffs is None:
surf.wolterprimary(rays,220.,8400.)
else:
surf.primaryLL(rays,220.,8400.,8500.,8400.,100./220.,\
*primCoeffs)
ind = np.logical_and(rays[3]>8400.,rays[3]<8500.)
tran.vignette(rays,ind=ind)
tran.transform(rays,0,0,8400.,0,0,0)
tran.itransform(rays,*primalign)
tran.reflect(rays)
#Go to primary focus
tran.transform(rays,0,0,-primfoc,0,0,0)
surf.flat(rays)
return rays,theta
|
mit
|
xmnlab/minilab
|
labtrans/daq/prepare_data.py
|
1
|
2713
|
from __future__ import division
from datetime import timedelta, datetime
from collections import defaultdict
from matplotlib.ticker import EngFormatter
import matplotlib.pyplot as plt
import pickle
import sys
from copy import deepcopy
import numpy as np
# mswim module path
sys.path.insert(0, '/var/www/mswim/')
# mswim packages
from mswim import settings
from mswim.libs.db import conn as db
from mswim.apps.acquisition.models import AcquisitionModel
def calc_time(delta_t, N):
timestamp = [datetime.now()]
def next_time(stamp):
_ts = stamp[0]
stamp[0] += delta_t
return _ts
return [next_time(timestamp) for _ in range(N)]
def test():
db.Pool.connect()
data_pickle = pickle.load(open('data.pic', 'rb'))
DEVICES = {}
_channels = [dict([('Dev2/ai%s' % sen, sen) for sen in range(16)])]
DEVICES['ceramic'] = {
'trigger': 'Dev2/port0/line0',
'channels': _channels,
'sensor_type': 2,
'temperature_sensor': False,
'rate':5000,
'minv':-10,
'maxv':10
}
for chans in DEVICES['ceramic']['channels']:
header = {
'acq_datetime': datetime.now(),
'temperature': {'5cm': None, '17cm': None},
'channels': len(chans)
}
data, acq_time = data_pickle, calc_time(timedelta(seconds=1/5000), 10000)
sensors = defaultdict(dict)
for chan_i, chan_data in data.items():
i_sensor = str(chans[chan_i])
for t, sensor_voltage in enumerate(chan_data):
# changed to return use the same baseline
sensor_time = acq_time[t]
if not sensors[i_sensor]:
sensors[i_sensor] = defaultdict(dict)
sensors[i_sensor][sensor_time] = sensor_voltage
print(sensor_voltage)
# print(count_0) # 4 sensores
# print(count_1) # 10000 puntos para cada sensor
# print(sensors)
# exit()
# save the data
acq = AcquisitionModel(
header, sensors, sensor_type=2
)
plot(acq.data.dict())
def test():
db.Pool.connect()
acq = AcquisitionModel.load('1010')
plot(acq.data.dict())
def plot(dados):
formatter = EngFormatter(unit='s', places=1)
# Analisa os sensores
for sensor in dados:
ax = plt.subplot(111)
ax.xaxis.set_major_formatter(formatter)
xs = []
ys = []
"Analisa os tempos do sensor"
for tempo in sorted(dados[sensor].keys()):
xs.append(tempo)
ys.append(dados[sensor][tempo])
ax.plot(xs, ys)
plt.show()
if __name__ == '__main__':
test()
|
gpl-3.0
|
vigilv/scikit-learn
|
examples/svm/plot_svm_margin.py
|
318
|
2328
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
|
bsd-3-clause
|
fabioticconi/scikit-learn
|
sklearn/datasets/lfw.py
|
31
|
19544
|
"""Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s",
archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the
shape of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change
the shape of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : numpy array of shape (2200, 2, 62, 47). Shape depends on
``subset``.
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
|
bsd-3-clause
|
mjgrav2001/scikit-learn
|
examples/manifold/plot_lle_digits.py
|
181
|
8510
|
"""
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing LDA projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
|
bsd-3-clause
|
sebalander/sebaPhD
|
resources/PTZgrid/cornerFinderPtzGrid.py
|
1
|
2649
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 27 13:34:13 2016
@author: sebalander
"""
# %%
import cv2
import numpy as np
import matplotlib.pyplot as plt
# %%
# input
# 6x9 chessboard
#imageFile = "./resources/fishChessboard/Screenshot from fishSeba.mp4 - 12.png"
# 8x11 A4 shetts chessboard
imageFile = "ptz_(0.850278, -0.014444, 0.0).jpg"
cornersIniFile = "PTZgridImageInitialConditions.txt"
# output
cornersFile = "ptzCorners.npy"
patternFile = "ptzGridPattern.npy"
imgShapeFile = "ptzImgShape.npy"
# load
# corners set by hand, read as (n,1,2) size
# must format as float32
cornersIni = np.array([[crnr] for crnr in np.loadtxt(cornersIniFile)],
dtype='float32')
img = cv2.imread(imageFile, cv2.IMREAD_GRAYSCALE)
imgCol = cv2.imread(imageFile)
# %% BINARIZE IMAGE
# see http://docs.opencv.org/3.0.0/d7/d4d/tutorial_py_thresholding.html
th = cv2.adaptiveThreshold(img,
255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY,
501,
0)
# haceomos un close para sacar manchas
kernel = np.ones((5,5),np.uint8)
closed = cv2.morphologyEx(th, cv2.MORPH_CLOSE, kernel)
plt.imshow(th)
plt.imshow(closed)
plt.imshow(imgCol)
plt.plot(cornersIni[:,0,0],cornersIni[:,0,1],'ow')
# %% refine corners
# criterio de finalizacion de cornerSubPix
subpixCriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, # termination criteria type
300, # max number of iterations
0.01) # min accuracy
corners = np.copy(cornersIni)
cv2.cornerSubPix(closed,
corners,
(15, 15),
(5, 5),
subpixCriteria);
plt.imshow(imgCol[:,:,[2,1,0]])
plt.plot(cornersIni[:,0,0],cornersIni[:,0,1],'+r', label="Initial")
plt.plot(corners[:,0,0],corners[:,0,1],'xb', label="Optimized")
plt.legend()
# %% DEFINE FIDUCIAL POINTS IN 3D SCENE, by hand
# shape must be (1,n,3), float32
nx = 8
ny = 12
xx = range(nx)
y0 = 12
yy = range(y0,y0-ny,-1)
grid = np.array([[[[x, y, 0] for x in xx] for y in yy]], dtype='float32')
grid = grid.reshape((1,nx*ny,3))
toDelete = np.logical_and(grid[0,:,0] < 2, grid[0,:,1] < 2)
grid = grid[:,np.logical_not(toDelete),:]
# scale to the size of A4 sheet
grid[0,:,0] *= 0.21
grid[0,:,1] *= 0.297
# %% PLOT FIDUCIAL POINTS
fig = plt.figure()
from mpl_toolkits.mplot3d import Axes3D
ax = fig.gca(projection='3d')
ax.scatter(grid[0,:,0], grid[0,:,1], grid[0,:,2])
plt.show()
# %% SAVE DATA POINTS
np.save(cornersFile, corners)
np.save(patternFile, grid)
np.save(imgShapeFile, img.shape)
|
bsd-3-clause
|
LouisPlisso/analysis_tools
|
cdfplot-tools/cdfplot_1.3.py
|
1
|
13501
|
#!/usr/bin/env python
"Module to plot cdf from data or file. Can be called directly."
from __future__ import division, print_function
from optparse import OptionParser
import sys
import pylab
from matplotlib.font_manager import FontProperties
_VERSION = '1.2'
#TODO: possibility to place legend outside graph:
#pylab.subfigure(111)
#pylab.subplots_adjust(right=0.8) or (top=0.8)
#pylab.legend(loc=(1.1, 0.5)
#CCDF
def ccdfplotdataN(list_data_name, _xlabel = 'x',
_ylabel = r'1 - P(X$\leq$x)',
_title = 'Empirical Distribution',
_fs_legend='medium',
_fs = 'x-large', _loc=0):
"Plot the ccdf of a list of data arrays and names"
#corresponding line width with larger width for '-.' and ':'
if not list_data_name:
print("no data to plot", sys.stderr)
return
_ls = ['-', '-.', '--'] #, ':']
_lw = [1, 2, 3] #, 4]
_ls_len = len(_ls)
#plot all cdfs except last one
for i in range(len(list_data_name) - 1):
name, data = list_data_name[i]
#plot with round robin line style (ls)
#and increasing line width
(div, mod) = divmod(i, _ls_len)
ccdfplotdata(data, _name=name, _lw=_lw[mod]+3*div,
_ls=_ls[mod], _fs=_fs, _fs_legend=_fs_legend)
#for last cdf, we put the legend and names
(name, data) = list_data_name[-1]
(div, mod) = divmod(len(list_data_name), _ls_len)
ccdfplotdata(data, _name=name, _title=_title, _xlabel=_xlabel,
_ylabel=_ylabel, _lw=_lw[mod]+2*div, _ls=_ls[mod], _fs=_fs)
setgraph_logx(_loc=_loc)
def ccdfplotdata(data_in, _xlabel = 'x', _ylabel = r'1 - P(X$\leq$x)',
_title = 'Empirical Distribution',
_name = 'Data', _lw = 2, _fs = 'x-large', _fs_legend='medium',
_ls = '-', _loc=0):
"Plot the ccdf of a data array"
data = pylab.array(data_in, copy=True)
data.sort()
data_len = len(data)
ccdf = 1 - pylab.arange(data_len)/(data_len - 1.0)
pylab.plot(data, ccdf, 'k', lw = _lw, drawstyle = 'steps',
label = _name, ls = _ls)
pylab.xlabel(_xlabel, size = _fs)
pylab.ylabel(_ylabel, size = _fs)
pylab.title(_title, size = _fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def ccdfplot(_file, col = 0, xlabel = 'X', ylabel = r'1 - P(X$\leq$x)',
title = 'Empirical Distribution', name = 'Data',
_lw = 2, _fs = 'x-large', _ls = '-', _loc=0):
"Plot the ccdf of a column in file"
data = pylab.loadtxt(_file, usecols = [col])
ccdfplotdata(data, _xlabel = xlabel, _ylabel = ylabel,
_title = title, _name = name,
_lw = _lw, _fs = _fs, _ls = _ls, _loc = _loc)
#CDF
def cdfplotdataN(list_data_name, _xlabel = 'x', _ylabel = r'P(X$\leq$x)',
_title = 'Empirical Distribution', _fs = 'x-large',
_fs_legend='medium', _loc = 0, do_color=True, logx=True, logy=False):
"Plot the cdf of a list of names and data arrays"
#corresponding line width with larger width for '-.' and ':'
if not list_data_name:
print("no data to plot", sys.stderr)
return
_ls = ['-', '-.', '-', '--'] * 2 #, ':']
# _lw = [1, 1] + [2, 4, 2, 4, 2, 4]#, 4]
_lw = [2, 4] + [2, 4, 2, 4, 2, 4]#, 4]
assert len(_ls) == len(_lw)
# _colors = ['k', 'k', 'g', 'c', 'm', 'r', 'y', 'pink']
# consequent plots are same color
_colors = ['k', 'k', 'c', 'c', 'm', 'm', 'y', 'y']
for i in range(len(list_data_name)):# - 1):
name, data = list_data_name[i]
#plot with round robin line style (ls)
#and increasing line width
(div, mod) = divmod(i, len(_ls))
if not do_color:
color = 'k'
# line_width = _lw[mod]+2*div
else:
color = _colors[i % len(_colors)]
# line_width = 2 + div
line_width = _lw[mod]+2*div
cdfplotdata(data, _name=name, _title=_title, _xlabel=_xlabel,
_ylabel=_ylabel, _lw=line_width, _ls=_ls[mod], _fs=_fs,
_color=color)
if logx and logy:
setgraph_loglog(_loc=_loc, _fs_legend=_fs_legend)
elif logy:
setgraph_logy(_loc=_loc, _fs_legend=_fs_legend)
elif logx:
setgraph_logx(_loc=_loc, _fs_legend=_fs_legend)
else:
setgraph_lin(_loc=_loc, _fs_legend=_fs_legend)
# cdfplotdata(data, _name=name, _lw=line_width, _ls=_ls[mod],
# _fs=_fs, _color=color)
# for last cdf, we put the legend and names
# (data, name) = list_data_name[-1]
# (div, mod) = divmod(len(list_data_name), len(_ls))
# if not do_color:
# color = 'k'
# line_width = _lw[mod]+2*div
# else:
# color = _colors[i % len(_colors)]
# line_width = 1 + div
# cdfplotdata(data, _name=name, _title=_title, _xlabel=_xlabel,
# _ylabel=_ylabel, _lw=line_width, _ls=_ls[mod], _fs=_fs,
# _fs_legend=_fs_legend, _color=color)
def cdfplotdata(data_in, _color='k', _xlabel='x', _ylabel=r'P(X$\leq$x)',
_title='Empirical Distribution', _name='Data', _lw=2, _fs='x-large',
_fs_legend='medium', _ls = '-', _loc=0):
"Plot the cdf of a data array"
# data = pylab.array(data_in, copy=True)
data = sorted(data_in)
data_len = len(data)
if data_len == 0:
print("no data to plot", sys.stderr)
return
cdf = pylab.arange(data_len+1)/(data_len - 0.0)
data.append(data[-1])
pylab.plot(data, cdf, _color, lw = _lw, drawstyle = 'steps',
label = _name + ': %d' % data_len, ls = _ls)
pylab.xlabel(_xlabel, size = _fs)
pylab.ylabel(_ylabel, size = _fs)
pylab.title(_title, size = _fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def cdfplot(_file, col = 0, xlabel = 'X',
ylabel = r'P(X$\leq$x)',
title = 'Empirical Distribution', name = 'Data',
_lw = 2, _fs = 'x-large', _ls = '-', _loc=0):
"Plot the cdf of a column in file"
data = pylab.loadtxt(_file, usecols = [col])
cdfplotdata(data, _xlabel = xlabel, _ylabel = ylabel,
_title = title, _name = name,
_lw = _lw, _fs = _fs, _ls = _ls, _loc = _loc)
def setgraph_lin(_fs = 'x-large', _loc = 2, _fs_legend = 'medium'):
"Set graph in xlogscale and adjusts x&y markers"
pylab.grid(True)
_ax = pylab.gca()
for tick in _ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
for tick in _ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def setgraph_logx(_fs = 'x-large', _loc = 2, _fs_legend = 'medium'):
"Set graph in xlogscale and adjusts x&y markers"
pylab.grid(True)
pylab.semilogx(nonposy='clip', nonposx='clip')
_ax = pylab.gca()
for tick in _ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
for tick in _ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def setgraph_loglog(_fs = 'x-large', _loc = 2, _fs_legend = 'medium'):
"Set graph in xlogscale and adjusts x&y markers"
pylab.grid(True)
pylab.loglog(nonposy='clip', nonposx='clip')
_ax = pylab.gca()
for tick in _ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
for tick in _ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def setgraph_logy(_fs = 'x-large', _loc = 2, _fs_legend = 'medium'):
"Set graph in xlogscale and adjusts x&y markers"
pylab.grid(True)
pylab.semilogy(nonposy='clip', nonposx='clip')
_ax = pylab.gca()
for tick in _ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
for tick in _ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
#repartition plots
def repartplotdataN(list_data_name, _xlabel = 'Rank',
_ylabel = 'Cumulative Percentage of Data',
_title = 'Repartition of values',
_fs = 'x-large', do_color=True, _loc=0, loglog=True):
"Plot the repartition of a list of data arrays and names"
#corresponding line width with larger width for '-.' and ':'
if not list_data_name:
print("no data to plot", sys.stderr)
return
_ls = ['-', '-.', '-', '--'] * 2 #, ':']
# _ls = ['-', '-.', '--', ':']
_lw = [2, 4] + [2, 4, 2, 4, 2, 4]#, 4]
# _lw = [1, 2, 3, 4]
assert len(_ls) == len(_lw)
_len_ls = len(_ls)
# consequent plots are same color
_colors = ['k', 'k', 'c', 'c', 'm', 'm', 'y', 'y']
for i in range(len(list_data_name)):# - 1):
name, data = list_data_name[i]
#plot with round robin line style (ls)
#and increasing line width
(div, mod) = divmod(i, _len_ls)
if not do_color:
color = 'k'
# line_width = _lw[mod]+2*div
else:
color = _colors[i % len(_colors)]
# line_width = 2 + div
line_width = _lw[mod]+2*div
repartplotdata(data, _name=name, _title=_title, _xlabel=_xlabel,
_ylabel=_ylabel, _lw=line_width, _ls=_ls[mod], _fs=_fs,
_color=color)
if loglog:
setgraph_loglog(_loc=_loc)
else:
setgraph_lin(_loc=_loc)
# #for last cdf, we put the legend and names
# (name, data) = list_data_name[-1]
# (div, mod) = divmod(len(list_data_name), _len_ls)
# repartplotdata(data, _name=name, _title=_title, _xlabel=_xlabel,
# _ylabel=_ylabel, _lw=_lw[mod]+2*div, _ls=_ls[mod], _fs=_fs)
# setgraph_loglog(_loc=_loc)
def repartplotdata(data_in, _color='k', _xlabel = 'Rank',
_ylabel = 'Cumulative Percentage of Data',
_title = 'Repartition of values', _name = 'Data', _lw = 2,
_fs = 'x-large', _fs_legend='medium', _ls = '-', _loc=0):
"Plot the repartition of a data array"
data = pylab.array(data_in, copy=True)
data.sort()
rank = pylab.arange(1, len(data) + 1)
values = pylab.cumsum(data[::-1])
pylab.plot(rank, 100 * values / values[-1], _color, lw = _lw,
drawstyle = 'steps', label = _name + ': %d' % len(data),
ls = _ls)
pylab.xlabel(_xlabel, size = _fs)
pylab.ylabel(_ylabel, size = _fs)
pylab.title(_title, size = _fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def repartplot(_file, col = 0, xlabel = 'Rank',
ylabel = 'Cumulative Percentage of Data',
title = 'Repartition of values', name = 'Data',
_lw = 2, _fs = 'x-large', _ls = '-', _loc=0):
"Plot the cdf of a column in file"
data = pylab.loadtxt(_file, usecols = [col])
repartplotdata(data, _xlabel = xlabel, _ylabel = ylabel,
_title = title, _name = name,
_lw = _lw, _fs = _fs, _ls = _ls, _loc = _loc)
def main():
"Program wrapper."
usage = "%prog -r data_file [-c col -x x_label -y y_label -t title \
-n data_name -lw line_width -fs fontsize [-g|-p]]"
parser = OptionParser(usage = usage, version="%prog " + _VERSION)
parser.add_option("-r", dest = "file",
help = "input data file or stdin if FILE is -")
parser.add_option("-c", dest = "col", type = "int", default = 0,
help = "column in the file [default value = 0]")
parser.add_option("-x", dest = "xlabel", default = 'X',
help = "x label")
parser.add_option("-y", dest = "ylabel",
default = r'P(X$\leq$x)', help = "y label")
parser.add_option("-t", dest = "title",
default = 'Empirical Distribution',
help = "graph title")
parser.add_option("-n", dest = "name", default = 'Data',
help = "data name")
parser.add_option("-l", "--lw", dest = "lw", type = "int",
default = 2, help = "line width")
parser.add_option("-f", "--fs", dest = "fs", type = "int",
default = 18, help = "font size")
parser.add_option("-g", "--ccdf", dest = "g",
action="store_true", default=False,
help = "plot ccdf instead of cdf")
parser.add_option("-p", "--repartition", dest = "p",
action="store_true", default=False,
help = "plot repartition instead of cdf")
(options, _) = parser.parse_args()
if not options.file:
print("Must provide filename.")
parser.print_help()
exit(1)
if options.file == '-':
out_file = sys.stdin
else:
try:
out_file = open(options.file, 'r')
except IOError:
print("File, %s, does not exist." % options.file)
parser.print_help()
exit(1)
if options.g and options.p:
print("g and p options are exclusive.")
parser.print_help()
exit(1)
pylab.clf()
if options.g:
ccdfplot(out_file, col=options.col, _lw=options.lw, _fs=options.fs)
elif options.p:
repartplot(out_file, col=options.col, _lw=options.lw, _fs=options.fs)
else:
cdfplot(out_file, col=options.col, xlabel=options.xlabel,
ylabel=options.ylabel, title=options.title,
name=options.name, _lw=options.lw, _fs=options.fs)
setgraph_logx(_fs = options.fs)
pylab.show()
if __name__ == '__main__':
sys.exit(main())
|
gpl-3.0
|
jgomezc1/FEM-Notes
|
scripts/SPRINGS/springs.py
|
1
|
4460
|
# -*- coding: utf-8 -*-
"""
Computes the displacements and internal forces for a mass-springs
system under static loads.
Variables
----------
ne : number of elements
nn : number of nodes
nm : number of material profiles
nl : number of point loads
IDN[] : Stores the nodal indentifier
IBC[] : Stores the nodal boundary condition (0:free; 1: restrained)
COR[] : Stores the nodal coordinates
MIE[] : Stores the equation lists for each element.
MAT[] : Stores the material properties.
IM[] : Stores the material profile for each element.
IP[] : Stores the nodal identifier for each point load.
PL[] : Stores points loads vector.
COR[] : Stores nodal coordinates.
KG[] : Stores the global stiffness matrix.
RHSG[] : Stores the global right hand side vector.
U[] : Stores the global displacements vector.
@author:Juan David Gómez
"""
import numpy as np
import femutil as fem
import matplotlib.pyplot as plt
def read_model(node_file, mater_file, els_file, load_file, verbose=True):
"""Read model data from text files"""
# rea
nodes = np.loadtxt(node_file)
mats = np.loadtxt(mater_file)
elements = np.loadtxt(els_file, dtype=int)
loads = np.loadtxt(load_file)
# Generate echo files
if verbose:
np.savetxt("KNODES.txt", nodes, fmt='%5.2f', delimiter=' ')
np.savetxt("KMATES.txt", mats, fmt='%5.2f', delimiter=' ')
np.savetxt("KELEMS.txt", elements, fmt='%5.2f', delimiter=' ')
np.savetxt("KLOADS.txt", loads, fmt='%5.2f', delimiter=' ')
return nodes, mats, elements, loads
nodes, mats, elements, loads = read_model('nodes.txt', 'mater.txt',
'eles.txt', 'loads.txt')
#
# Retrieves problem parameters and initializes
# arrays.
#
ne = len(elements[:,0])
nn = len(nodes[:,0])
nm = len(mats[:])
nl = len(loads[:,0])
#
IDN = np.zeros([nn,1],dtype=int)
IBC = np.zeros([nn,1],dtype=int)
COR = np.zeros([nn,1],dtype=float)
MIE = np.zeros([ne,2],dtype=int)
IM = np.zeros([ne,1],dtype=int)
IP = np.zeros([nl,1],dtype=int)
PL = np.zeros([nl,1],dtype=float)
#
# Pre-processing begins
#
# Counts active equations
#
icount = 0
for i in range(0,nn):
IDN[i] = int(nodes[i,0])
COR[i] = nodes[i,1]
IBC[i] = int(nodes[i,2])
if IBC[i] == 0:
IBC[i] = icount
icount = icount + 1
#
# Assembles MIE in translated form
#
for i in range(0,ne):
IM[i] = elements[i,3]
for j in range(1,3):
MIE[i,j-1] = IBC[elements[i,j]]
#
# Reads points loads
#
for i in range(0,nl):
IP[i] = int(loads[i,0])
PL[i] = loads[i,1]
#
# Starts global system assembly
#
KG = np.zeros([icount,icount], dtype=float)
RHS = np.zeros([icount], dtype=float)
U = np.zeros([icount], dtype=float)
#
# Loops through all the elements
#
for i in range(0,ne):
imm = IM[i]
k = mats[imm]
lmie = np.zeros([2],dtype=int)
for j in range(0,2):
lmie[j] = MIE[i,j]
#
# Calls UEL
#
kloc = fem.uel(k)
print "Local stiffnes matrix for element %i:\n"%i, kloc
#
# Global stiffness
#
for ii in range(0,2):
kk=lmie[ii]
if kk != -1:
for jj in range(0,2):
ll = lmie[jj]
if ll != -1:
KG[kk,ll] = KG[kk,ll] + kloc[ii,jj]
#
plt.figure()
plt.spy(KG)
plt.title("Stiff matrix")
plt.ylabel(r"$i$ index", size=14)
plt.xlabel(r"$j$ index", size=14)
#
# Global RHS
#
for i in range(0,icount):
il = IBC[IP[i]]
RHS[il] = PL[i]
#%%
#
# Solution begins
#
U = np.linalg.solve(KG, RHS)
print 'Global displacements', U
equil = np.allclose(np.dot(KG, U), RHS)
if equil:
print "Equilibrium satisfied."
else:
print "Equilibrium not satisfied"
#%%
# Post-processing begins
#
for i in range(0,ne):
imm = IM[i]
k = mats[imm]
ul = np.zeros([2],dtype=float)
rhsl = np.zeros([2],dtype=float)
for j in range(0,2):
kk = MIE[i,j]
if kk == -1:
ul[j] = 0.0
else:
ul[j] = U[IBC[kk]]
kloc = fem.uel(k)
rhsl = np.dot(kloc, ul)
print 'Internal forces for element %i = ' % i, np.round(rhsl, 6)
#
# Prints element results
#
#
#np.savetxt("UGLOB.txt", U, fmt='%5.2f', delimiter=' ')
plt.figure()
plt.plot(U, 'ro')
plt.ylabel('Global displacements', size=14)
plt.xlabel('Global coordinate', size=14)
print('Program terminated')
plt.show()
|
mit
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/sklearn/neighbors/regression.py
|
8
|
10967
|
"""Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
|
mit
|
daodaoliang/neural-network-animation
|
matplotlib/texmanager.py
|
11
|
25173
|
"""
This module supports embedded TeX expressions in matplotlib via dvipng
and dvips for the raster and postscript backends. The tex and
dvipng/dvips information is cached in ~/.matplotlib/tex.cache for reuse between
sessions
Requirements:
* latex
* \*Agg backends: dvipng
* PS backend: latex w/ psfrag, dvips, and Ghostscript 8.51
(older versions do not work properly)
Backends:
* \*Agg
* PS
* PDF
For raster output, you can get RGBA numpy arrays from TeX expressions
as follows::
texmanager = TexManager()
s = ('\\TeX\\ is Number '
'$\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\pi}}{2^n}$!')
Z = self.texmanager.get_rgba(s, size=12, dpi=80, rgb=(1,0,0))
To enable tex rendering of all text in your matplotlib figure, set
text.usetex in your matplotlibrc file (http://matplotlib.sf.net/matplotlibrc)
or include these two lines in your script::
from matplotlib import rc
rc('text', usetex=True)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import copy
import glob
import os
import shutil
import sys
import warnings
from hashlib import md5
import distutils.version
import numpy as np
import matplotlib as mpl
from matplotlib import rcParams
from matplotlib._png import read_png
from matplotlib.cbook import mkdirs
from matplotlib.compat.subprocess import Popen, PIPE, STDOUT
import matplotlib.dviread as dviread
import re
DEBUG = False
if sys.platform.startswith('win'):
cmd_split = '&'
else:
cmd_split = ';'
def dvipng_hack_alpha():
try:
p = Popen(['dvipng', '-version'], stdin=PIPE, stdout=PIPE,
stderr=STDOUT, close_fds=(sys.platform != 'win32'))
stdout, stderr = p.communicate()
except OSError:
mpl.verbose.report('No dvipng was found', 'helpful')
return False
lines = stdout.decode('ascii').split('\n')
for line in lines:
if line.startswith('dvipng '):
version = line.split()[-1]
mpl.verbose.report('Found dvipng version %s' % version,
'helpful')
version = distutils.version.LooseVersion(version)
return version < distutils.version.LooseVersion('1.6')
mpl.verbose.report('Unexpected response from dvipng -version', 'helpful')
return False
class TexManager:
"""
Convert strings to dvi files using TeX, caching the results to a
working dir
"""
oldpath = mpl.get_home()
if oldpath is None:
oldpath = mpl.get_data_path()
oldcache = os.path.join(oldpath, '.tex.cache')
cachedir = mpl.get_cachedir()
if cachedir is not None:
texcache = os.path.join(cachedir, 'tex.cache')
else:
# Should only happen in a restricted environment (such as Google App
# Engine). Deal with this gracefully by not creating a cache directory.
texcache = None
if os.path.exists(oldcache):
if texcache is not None:
try:
shutil.move(oldcache, texcache)
except IOError as e:
warnings.warn('File could not be renamed: %s' % e)
else:
warnings.warn("""\
Found a TeX cache dir in the deprecated location "%s".
Moving it to the new default location "%s".""" % (oldcache, texcache))
else:
warnings.warn("""\
Could not rename old TeX cache dir "%s": a suitable configuration
directory could not be found.""" % oldcache)
if texcache is not None:
mkdirs(texcache)
_dvipng_hack_alpha = None
#_dvipng_hack_alpha = dvipng_hack_alpha()
# mappable cache of
rgba_arrayd = {}
grey_arrayd = {}
postscriptd = {}
pscnt = 0
serif = ('cmr', '')
sans_serif = ('cmss', '')
monospace = ('cmtt', '')
cursive = ('pzc', '\\usepackage{chancery}')
font_family = 'serif'
font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
font_info = {'new century schoolbook': ('pnc',
r'\renewcommand{\rmdefault}{pnc}'),
'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
'times': ('ptm', '\\usepackage{mathptmx}'),
'palatino': ('ppl', '\\usepackage{mathpazo}'),
'zapf chancery': ('pzc', '\\usepackage{chancery}'),
'cursive': ('pzc', '\\usepackage{chancery}'),
'charter': ('pch', '\\usepackage{charter}'),
'serif': ('cmr', ''),
'sans-serif': ('cmss', ''),
'helvetica': ('phv', '\\usepackage{helvet}'),
'avant garde': ('pag', '\\usepackage{avant}'),
'courier': ('pcr', '\\usepackage{courier}'),
'monospace': ('cmtt', ''),
'computer modern roman': ('cmr', ''),
'computer modern sans serif': ('cmss', ''),
'computer modern typewriter': ('cmtt', '')}
_rc_cache = None
_rc_cache_keys = (('text.latex.preamble', ) +
tuple(['font.' + n for n in ('family', ) +
font_families]))
def __init__(self):
if self.texcache is None:
raise RuntimeError(
('Cannot create TexManager, as there is no cache directory '
'available'))
mkdirs(self.texcache)
ff = rcParams['font.family']
if len(ff) == 1 and ff[0].lower() in self.font_families:
self.font_family = ff[0].lower()
elif isinstance(ff, six.string_types) and ff.lower() in self.font_families:
self.font_family = ff.lower()
else:
mpl.verbose.report(
'font.family must be one of (%s) when text.usetex is True. '
'serif will be used by default.' %
', '.join(self.font_families),
'helpful')
self.font_family = 'serif'
fontconfig = [self.font_family]
for font_family, font_family_attr in [(ff, ff.replace('-', '_'))
for ff in self.font_families]:
for font in rcParams['font.' + font_family]:
if font.lower() in self.font_info:
setattr(self, font_family_attr,
self.font_info[font.lower()])
if DEBUG:
print('family: %s, font: %s, info: %s' %
(font_family, font,
self.font_info[font.lower()]))
break
else:
if DEBUG:
print('$s font is not compatible with usetex')
else:
mpl.verbose.report('No LaTeX-compatible font found for the '
'%s font family in rcParams. Using '
'default.' % ff, 'helpful')
setattr(self, font_family_attr, self.font_info[font_family])
fontconfig.append(getattr(self, font_family_attr)[0])
self._fontconfig = ''.join(fontconfig)
# The following packages and commands need to be included in the latex
# file's preamble:
cmd = [self.serif[1], self.sans_serif[1], self.monospace[1]]
if self.font_family == 'cursive':
cmd.append(self.cursive[1])
while '\\usepackage{type1cm}' in cmd:
cmd.remove('\\usepackage{type1cm}')
cmd = '\n'.join(cmd)
self._font_preamble = '\n'.join(['\\usepackage{type1cm}', cmd,
'\\usepackage{textcomp}'])
def get_basefile(self, tex, fontsize, dpi=None):
"""
returns a filename based on a hash of the string, fontsize, and dpi
"""
s = ''.join([tex, self.get_font_config(), '%f' % fontsize,
self.get_custom_preamble(), str(dpi or '')])
# make sure hash is consistent for all strings, regardless of encoding:
bytes = six.text_type(s).encode('utf-8')
return os.path.join(self.texcache, md5(bytes).hexdigest())
def get_font_config(self):
"""Reinitializes self if relevant rcParams on have changed."""
if self._rc_cache is None:
self._rc_cache = dict([(k, None) for k in self._rc_cache_keys])
changed = [par for par in self._rc_cache_keys
if rcParams[par] != self._rc_cache[par]]
if changed:
if DEBUG:
print('DEBUG following keys changed:', changed)
for k in changed:
if DEBUG:
print('DEBUG %-20s: %-10s -> %-10s' %
(k, self._rc_cache[k], rcParams[k]))
# deepcopy may not be necessary, but feels more future-proof
self._rc_cache[k] = copy.deepcopy(rcParams[k])
if DEBUG:
print('DEBUG RE-INIT\nold fontconfig:', self._fontconfig)
self.__init__()
if DEBUG:
print('DEBUG fontconfig:', self._fontconfig)
return self._fontconfig
def get_font_preamble(self):
"""
returns a string containing font configuration for the tex preamble
"""
return self._font_preamble
def get_custom_preamble(self):
"""returns a string containing user additions to the tex preamble"""
return '\n'.join(rcParams['text.latex.preamble'])
def _get_shell_cmd(self, *args):
"""
On windows, changing directories can be complicated by the presence of
multiple drives. get_shell_cmd deals with this issue.
"""
if sys.platform == 'win32':
command = ['%s' % os.path.splitdrive(self.texcache)[0]]
else:
command = []
command.extend(args)
return ' && '.join(command)
def make_tex(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific font size
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex' % basefile
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif': r'{\sffamily %s}',
'monospace': r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\\usepackage{ucs}
\\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = """\\documentclass{article}
%s
%s
%s
\\usepackage[papersize={72in,72in},body={70in,70in},margin={1in,1in}]{geometry}
\\pagestyle{empty}
\\begin{document}
\\fontsize{%f}{%f}%s
\\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize * 1.25, tex)
with open(texfile, 'wb') as fh:
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s.encode('ascii'))
except UnicodeEncodeError as err:
mpl.verbose.report("You are using unicode and latex, but "
"have not enabled the matplotlib "
"'text.latex.unicode' rcParam.",
'helpful')
raise
return texfile
_re_vbox = re.compile(
r"MatplotlibBox:\(([\d.]+)pt\+([\d.]+)pt\)x([\d.]+)pt")
def make_tex_preview(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific
font size. It uses the preview.sty to determin the dimension
(width, height, descent) of the output.
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex' % basefile
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif': r'{\sffamily %s}',
'monospace': r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\\usepackage{ucs}
\\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
# newbox, setbox, immediate, etc. are used to find the box
# extent of the rendered text.
s = """\\documentclass{article}
%s
%s
%s
\\usepackage[active,showbox,tightpage]{preview}
\\usepackage[papersize={72in,72in},body={70in,70in},margin={1in,1in}]{geometry}
%% we override the default showbox as it is treated as an error and makes
%% the exit status not zero
\\def\\showbox#1{\\immediate\\write16{MatplotlibBox:(\\the\\ht#1+\\the\\dp#1)x\\the\\wd#1}}
\\begin{document}
\\begin{preview}
{\\fontsize{%f}{%f}%s}
\\end{preview}
\\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize * 1.25, tex)
with open(texfile, 'wb') as fh:
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s.encode('ascii'))
except UnicodeEncodeError as err:
mpl.verbose.report("You are using unicode and latex, but "
"have not enabled the matplotlib "
"'text.latex.unicode' rcParam.",
'helpful')
raise
return texfile
def make_dvi(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex string
returns the file name
"""
if rcParams['text.latex.preview']:
return self.make_dvi_preview(tex, fontsize)
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi' % basefile
if DEBUG or not os.path.exists(dvifile):
texfile = self.make_tex(tex, fontsize)
outfile = basefile + '.output'
command = self._get_shell_cmd(
'cd "%s"' % self.texcache,
'latex -interaction=nonstopmode %s > "%s"' %
(os.path.split(texfile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
with open(outfile) as fh:
report = fh.read()
except IOError:
report = 'No latex error report available.'
try:
os.stat(dvifile)
exists = True
except OSError:
exists = False
if exit_status or not exists:
raise RuntimeError(
('LaTeX was not able to process the following '
'string:\n%s\nHere is the full report generated by '
'LaTeX: \n\n' % repr(tex.encode('unicode_escape')) +
report))
else:
mpl.verbose.report(report, 'debug')
for fname in glob.glob(basefile + '*'):
if fname.endswith('dvi'):
pass
elif fname.endswith('tex'):
pass
else:
try:
os.remove(fname)
except OSError:
pass
return dvifile
def make_dvi_preview(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex
string. It calls make_tex_preview() method and store the size
information (width, height, descent) in a separte file.
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi' % basefile
baselinefile = '%s.baseline' % basefile
if (DEBUG or not os.path.exists(dvifile) or
not os.path.exists(baselinefile)):
texfile = self.make_tex_preview(tex, fontsize)
outfile = basefile + '.output'
command = self._get_shell_cmd(
'cd "%s"' % self.texcache,
'latex -interaction=nonstopmode %s > "%s"' %
(os.path.split(texfile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
with open(outfile) as fh:
report = fh.read()
except IOError:
report = 'No latex error report available.'
if exit_status:
raise RuntimeError(
('LaTeX was not able to process the following '
'string:\n%s\nHere is the full report generated by '
'LaTeX: \n\n' % repr(tex)) + report)
else:
mpl.verbose.report(report, 'debug')
# find the box extent information in the latex output
# file and store them in ".baseline" file
m = TexManager._re_vbox.search(report)
with open(basefile + '.baseline', "w") as fh:
fh.write(" ".join(m.groups()))
for fname in glob.glob(basefile + '*'):
if fname.endswith('dvi'):
pass
elif fname.endswith('tex'):
pass
elif fname.endswith('baseline'):
pass
else:
try:
os.remove(fname)
except OSError:
pass
return dvifile
def make_png(self, tex, fontsize, dpi):
"""
generates a png file containing latex's rendering of tex string
returns the filename
"""
basefile = self.get_basefile(tex, fontsize, dpi)
pngfile = '%s.png' % basefile
# see get_rgba for a discussion of the background
if DEBUG or not os.path.exists(pngfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile + '.output'
command = self._get_shell_cmd(
'cd "%s"' % self.texcache,
'dvipng -bg Transparent -D %s -T tight -o "%s" "%s" > "%s"' %
(dpi, os.path.split(pngfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
with open(outfile) as fh:
report = fh.read()
except IOError:
report = 'No dvipng error report available.'
if exit_status:
raise RuntimeError(
'dvipng was not able to process the following '
'file:\n%s\nHere is the full report generated by '
'dvipng: \n\n' % dvifile + report)
else:
mpl.verbose.report(report, 'debug')
try:
os.remove(outfile)
except OSError:
pass
return pngfile
def make_ps(self, tex, fontsize):
"""
generates a postscript file containing latex's rendering of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
psfile = '%s.epsf' % basefile
if DEBUG or not os.path.exists(psfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile + '.output'
command = self._get_shell_cmd(
'cd "%s"' % self.texcache,
'dvips -q -E -o "%s" "%s" > "%s"' %
(os.path.split(psfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
with open(outfile) as fh:
if exit_status:
raise RuntimeError(
'dvipng was not able to process the flowing '
'file:\n%s\nHere is the full report generated by '
'dvipng: \n\n' % dvifile + fh.read())
else:
mpl.verbose.report(fh.read(), 'debug')
os.remove(outfile)
return psfile
def get_ps_bbox(self, tex, fontsize):
"""
returns a list containing the postscript bounding box for latex's
rendering of the tex string
"""
psfile = self.make_ps(tex, fontsize)
with open(psfile) as ps:
for line in ps:
if line.startswith('%%BoundingBox:'):
return [int(val) for val in line.split()[1:]]
raise RuntimeError('Could not parse %s' % psfile)
def get_grey(self, tex, fontsize=None, dpi=None):
"""returns the alpha channel"""
key = tex, self.get_font_config(), fontsize, dpi
alpha = self.grey_arrayd.get(key)
if alpha is None:
pngfile = self.make_png(tex, fontsize, dpi)
X = read_png(os.path.join(self.texcache, pngfile))
if rcParams['text.dvipnghack'] is not None:
hack = rcParams['text.dvipnghack']
else:
if TexManager._dvipng_hack_alpha is None:
TexManager._dvipng_hack_alpha = dvipng_hack_alpha()
hack = TexManager._dvipng_hack_alpha
if hack:
# hack the alpha channel
# dvipng assumed a constant background, whereas we want to
# overlay these rasters with antialiasing over arbitrary
# backgrounds that may have other figure elements under them.
# When you set dvipng -bg Transparent, it actually makes the
# alpha channel 1 and does the background compositing and
# antialiasing itself and puts the blended data in the rgb
# channels. So what we do is extract the alpha information
# from the red channel, which is a blend of the default dvipng
# background (white) and foreground (black). So the amount of
# red (or green or blue for that matter since white and black
# blend to a grayscale) is the alpha intensity. Once we
# extract the correct alpha information, we assign it to the
# alpha channel properly and let the users pick their rgb. In
# this way, we can overlay tex strings on arbitrary
# backgrounds with antialiasing
#
# red = alpha*red_foreground + (1-alpha)*red_background
#
# Since the foreground is black (0) and the background is
# white (1) this reduces to red = 1-alpha or alpha = 1-red
#alpha = npy.sqrt(1-X[:,:,0]) # should this be sqrt here?
alpha = 1 - X[:, :, 0]
else:
alpha = X[:, :, -1]
self.grey_arrayd[key] = alpha
return alpha
def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0, 0, 0)):
"""
Returns latex's rendering of the tex string as an rgba array
"""
if not fontsize:
fontsize = rcParams['font.size']
if not dpi:
dpi = rcParams['savefig.dpi']
r, g, b = rgb
key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb)
Z = self.rgba_arrayd.get(key)
if Z is None:
alpha = self.get_grey(tex, fontsize, dpi)
Z = np.zeros((alpha.shape[0], alpha.shape[1], 4), np.float)
Z[:, :, 0] = r
Z[:, :, 1] = g
Z[:, :, 2] = b
Z[:, :, 3] = alpha
self.rgba_arrayd[key] = Z
return Z
def get_text_width_height_descent(self, tex, fontsize, renderer=None):
"""
return width, heigth and descent of the text.
"""
if tex.strip() == '':
return 0, 0, 0
if renderer:
dpi_fraction = renderer.points_to_pixels(1.)
else:
dpi_fraction = 1.
if rcParams['text.latex.preview']:
# use preview.sty
basefile = self.get_basefile(tex, fontsize)
baselinefile = '%s.baseline' % basefile
if DEBUG or not os.path.exists(baselinefile):
dvifile = self.make_dvi_preview(tex, fontsize)
with open(baselinefile) as fh:
l = fh.read().split()
height, depth, width = [float(l1) * dpi_fraction for l1 in l]
return width, height + depth, depth
else:
# use dviread. It sometimes returns a wrong descent.
dvifile = self.make_dvi(tex, fontsize)
dvi = dviread.Dvi(dvifile, 72 * dpi_fraction)
try:
page = next(iter(dvi))
finally:
dvi.close()
# A total height (including the descent) needs to be returned.
return page.width, page.height + page.descent, page.descent
|
mit
|
hmmlearn/hmmlearn
|
examples/plot_hmm_sampling.py
|
1
|
1974
|
"""
Sampling from HMM
-----------------
This script shows how to sample points from a Hidden Markov Model (HMM):
we use a 4-state model with specified mean and covariance.
The plot show the sequence of observations generated with the transitions
between them. We can see that, as specified by our transition matrix,
there are no transition between component 1 and 3.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from hmmlearn import hmm
##############################################################
# Prepare parameters for a 4-components HMM
# Initial population probability
startprob = np.array([0.6, 0.3, 0.1, 0.0])
# The transition matrix, note that there are no transitions possible
# between component 1 and 3
transmat = np.array([[0.7, 0.2, 0.0, 0.1],
[0.3, 0.5, 0.2, 0.0],
[0.0, 0.3, 0.5, 0.2],
[0.2, 0.0, 0.2, 0.6]])
# The means of each component
means = np.array([[0.0, 0.0],
[0.0, 11.0],
[9.0, 10.0],
[11.0, -1.0]])
# The covariance of each component
covars = .5 * np.tile(np.identity(2), (4, 1, 1))
# Build an HMM instance and set parameters
model = hmm.GaussianHMM(n_components=4, covariance_type="full")
# Instead of fitting it from the data, we directly set the estimated
# parameters, the means and covariance of the components
model.startprob_ = startprob
model.transmat_ = transmat
model.means_ = means
model.covars_ = covars
###############################################################
# Generate samples
X, Z = model.sample(500)
# Plot the sampled data
plt.plot(X[:, 0], X[:, 1], ".-", label="observations", ms=6,
mfc="orange", alpha=0.7)
# Indicate the component numbers
for i, m in enumerate(means):
plt.text(m[0], m[1], 'Component %i' % (i + 1),
size=17, horizontalalignment='center',
bbox=dict(alpha=.7, facecolor='w'))
plt.legend(loc='best')
plt.show()
|
bsd-3-clause
|
elijah513/scikit-learn
|
examples/linear_model/plot_logistic.py
|
312
|
1426
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
|
bsd-3-clause
|
zctea/biocode
|
sandbox/jorvis/summarize_vcfannotator.py
|
3
|
4385
|
#!/usr/bin/env python3.2
'''
The goal here is to take any VCF file output by VCFannotator and summarize the SNPs
contained there by type.
INPUT
The input expected is from the VCFannotator documentation:
An example output line transposed to a column format would look like so (taken from
the sample data):
0 TY-2482_chromosome
1 5080
2 .
3 T
4 C
5 8101.55
6 PASS
7 AC=2;AF=1.00;AN=2;DP=212;Dels=0.00;FS=0.000;HRun=1;HaplotypeScore=0.0000;MQ=59.52;MQ0=0;QD=38.21;SB=-4130.96
8 GT:AD:DP:GQ:PL
9 1/1:0,212:212:99:8101,605,0
10 CDS,tmpTY2482_00008,tmpTY2482_00008T0,microcin H47 immunity protein mchI,trans_orient:+,loc_in_cds:46,codon_pos:1,codon:Tct-Cct,pep:S->P,Ser-16-Pro,(NSY)
The final field is bundled with the individual feature annotation data, comma-delimited.
This includes the feature type (eg. CDS, intron, UTR), gene and transcript identifiers,
name of the gene, and the transcribed orientation of the gene. If the SNP is localized
to a coding region, then the relative position within that CDS sequence is provided in
addition to the codon change. The types of coding mutations are provided as synonomous
(SYN), non-synonomous (NSY), read-thru (RTH), and nonsence (STP). SNPs that are localized
to intergenic regions are reported as such along with the identifiers of the neighboring
genes and distance to each.
This isn't directly followed by Brian's VCFannotator, but for my own notes these are the SNP types:
- Non-coding region
- Coding region
- Synonymous
- Nonsynonymous
- Missense (results in a different amino acid, but not one of the three below)
- Nonsense (results in a premature stop codon)
- Read-through (releases a stop codon)
- Initiating (translation-initiating fMet is changed)
'''
import argparse
import biocodeutils
import gzip
import matplotlib.pyplot as plt
import os
import re
def main():
parser = argparse.ArgumentParser( description='Put a description of your script here')
## output file to be written
parser.add_argument('-v', '--vcf_file', type=str, required=True, help='Input VCFannotated file' )
args = parser.parse_args()
file = args.vcf_file
file_is_encoded = False
fh = None
if file.endswith(".gz"):
fh = gzip.GzipFile(file, "r")
file_is_encoded = True
else:
fh = open(file)
total_snp_c = 0
cds_snp_c = 0
intron_snp_c = 0
intergenic_snp_c = 0
## these are each subcategories of SNPs within a CDS
syn_c = 0
nonsyn_c = 0
readthru_c = 0
nonsense_c = 0
other_c = 0
for line in fh:
if file_is_encoded:
line = line.decode().rstrip()
else:
line = line.rstrip()
cols = line.split("\t")
if line.startswith("#"):
continue
cols = line.split("\t")
## VCF annotated files should all have 10 columns.
if len(cols) < 10:
raise Exception("ERROR: expected all non-comment lines to have 10 columns")
annot_col = cols[10]
snp_loc = None
snp_type = None
total_snp_c += 1
if annot_col.startswith("intergenic"):
intergenic_snp_c += 1
elif annot_col.startswith("intron"):
intron_snp_c += 1
elif annot_col.startswith("CDS"):
cds_snp_c += 1
cds_type = annot_col.split(",")[-1]
if cds_type == "(SYN)":
syn_c += 1
elif cds_type == "(NSY)":
nonsyn_c += 1
elif cds_type == "(RTH)":
readthru_c += 1
elif cds_type == "(STP)":
nonsense_c += 1
else:
other_c += 1
else:
raise Exception("ERROR: Unexpected SNP type at beginning of column: {0}".format(annot_col) )
print("Total SNPs: {}".format(total_snp_c) )
print("Intergenic: {}".format(intergenic_snp_c) )
print("Intronic : {}".format(intron_snp_c) )
print("Within CDS: {}".format(cds_snp_c) )
print("\tSynonymous : {}".format(syn_c) )
print("\tNon-synonymous: {}".format(nonsyn_c) )
print("\tRead-through : {}".format(readthru_c) )
print("\tNonsense : {}".format(nonsense_c) )
if __name__ == '__main__':
main()
|
gpl-3.0
|
zmlabe/IceVarFigs
|
Scripts/SeaIce/plot_Walsh_ExtendedSeaIceConc_v2.py
|
1
|
5055
|
"""
Plot sea ice concentration from last 100 years using the Walsh reconstruction
for version 2
Website : https://nsidc.org/data/g10010
Author : Zachary M. Labe
Date : 2 June 2020
"""
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import numpy as np
import datetime
import calendar as cal
from matplotlib.colors import ListedColormap, BoundaryNorm
import cmocean
### Define constants
directorydata = './Data/'
directoryfigure = './Figures/'
now = datetime.datetime.now()
month = now.month
### Input years
years = np.arange(1918,2017+1,1)
### Read data from server
data = Dataset(directorydata + 'G10010_SIBT1850_v2.0.nc')
lats = data.variables['latitude'][:]
lons = data.variables['longitude'][:]
sic = data.variables['seaice_conc'][-1200:,:,:]
data.close()
### Reshape concentration
sic = np.reshape(sic,(sic.shape[0]//12,12,lats.shape[0],lons.shape[0]),)
### Slice month
sicmo = sic[:,8,:,:].astype(np.float64)
sicmo[np.where(sicmo == 0)] = np.nan
sicmo[np.where(sicmo > 100)] = np.nan
sicmo = sicmo/100.
###############################################################################
###############################################################################
###############################################################################
### Plot figure
### Define parameters (dark)
def setcolor(x, color):
for m in x:
for t in x[m][1]:
t.set_color(color)
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='white')
plt.rc('xtick',color='white')
plt.rc('ytick',color='white')
plt.rc('axes',labelcolor='white')
plt.rc('axes',facecolor='black')
## Plot global temperature anomalies
style = 'polar'
### Create 2d arrays of latitude and longitude
lon2,lat2 = np.meshgrid(lons,lats)
### Define figure
if style == 'ortho':
m = Basemap(projection='ortho',lon_0=-90,
lat_0=70,resolution='l',round=True)
elif style == 'polar':
m = Basemap(projection='npstere',boundinglat=50,lon_0=270,
resolution='l',round =True,area_thresh=10000)
for i in range(sicmo.shape[0]): # 100 years
fig = plt.figure()
ax = plt.subplot(111)
### Hide data for transparent background
for txt in fig.texts:
txt.set_visible(False)
### Loop through 100 years
var = sicmo[i,:,:]
### Create polar map
m.drawmapboundary(fill_color='k',zorder=1)
m.drawlsmask(land_color='k',ocean_color='k')
# Make the plot continuous
barlim = np.arange(0.1,1.1,1)
### Plot filled contours
cs = m.contourf(lon2,lat2,var,
np.arange(0.1,1.1,0.05),extend='both',
alpha=1,latlon=True,zorder=2)
### Color map
cmap = cmocean.cm.ice
cs.set_cmap(cmap)
### Mask land
m.fillcontinents(color='k')
m.drawcoastlines(color='darkred',linewidth=0.6)
### Fill page
plt.tight_layout()
### Data information
t = plt.annotate(r'\textbf{%s}' % years[i],textcoords='axes fraction',
xy=(0,0), xytext=(-0.06,0.88),
fontsize=50,color='w',ha='center',va='center')
t = plt.annotate(r'\textbf{GRAPHIC}: Zachary Labe (@ZLabe)',
textcoords='axes fraction',
xy=(0,0), xytext=(-0.28,-0.01),
fontsize=5,color='darkgrey')
t = plt.annotate(r'\textbf{SOURCE}: https://nsidc.org/data/g10010',
textcoords='axes fraction',
xy=(0,0), xytext=(-0.28,0.03),
fontsize=5,color='darkgrey')
t = plt.annotate(r'\textbf{REFERENCE}: Walsh et al. [2016], Version 2',
textcoords='axes fraction',
xy=(0,0), xytext=(-0.28,0.01),
fontsize=5,color='darkgrey')
t = plt.annotate(r'\textbf{DATA}: Sea Ice Concentration \textbf{[SEPTEMBER}',
textcoords='axes fraction',
xy=(0,0), xytext=(-0.28,0.05),
fontsize=5,color='darkgrey')
### Save images for creating GIF using ImageMagick
if i < 10:
plt.savefig(directoryfigure + 'Walsh_v2_0%s.png' % i,dpi=300)
else:
plt.savefig(directoryfigure + 'Walsh_v2_%s.png' % i,dpi=300)
if i == 99:
plt.savefig(directoryfigure + 'Walsh_v2_99.png',dpi=300)
plt.savefig(directoryfigure + 'Walsh_v2_991.png',dpi=300)
plt.savefig(directoryfigure + 'Walsh_v2_992.png',dpi=300)
plt.savefig(directoryfigure + 'Walsh_v2_993.png',dpi=300)
plt.savefig(directoryfigure + 'Walsh_v2_994.png',dpi=300)
plt.savefig(directoryfigure + 'Walsh_v2_995.png',dpi=300)
plt.savefig(directoryfigure + 'Walsh_v2_996.png',dpi=300)
plt.savefig(directoryfigure + 'Walsh_v2_997.png',dpi=300)
plt.savefig(directoryfigure + 'Walsh_v2_998.png',dpi=300)
plt.savefig(directoryfigure + 'Walsh_v2_999.png',dpi=300)
t.remove()
|
mit
|
NixaSoftware/CVis
|
venv/lib/python2.7/site-packages/pandas/core/reshape/concat.py
|
2
|
21484
|
"""
concat routines
"""
import numpy as np
from pandas import compat, DataFrame, Series, Index, MultiIndex
from pandas.core.index import (_get_objs_combined_axis,
_ensure_index, _get_consensus_names,
_all_indexes_same)
from pandas.core.categorical import (_factorize_from_iterable,
_factorize_from_iterables)
from pandas.core.internals import concatenate_block_managers
from pandas.core import common as com
from pandas.core.generic import NDFrame
import pandas.core.dtypes.concat as _concat
# ---------------------------------------------------------------------
# Concatenate DataFrame objects
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
copy=True):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis(es)
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic
ignore_index : boolean, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys
names : list, default None
Names for the levels in the resulting hierarchical index
verify_integrity : boolean, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation
copy : boolean, default True
If False, do not copy data unnecessarily
Returns
-------
concatenated : object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
pandas objects can be found `here
<http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.
See Also
--------
Series.append
DataFrame.append
DataFrame.join
DataFrame.merge
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2',])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3])
animal letter number
0 NaN a 1
1 NaN b 2
0 cat c 3
1 dog d 4
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a']
"""
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
verify_integrity=verify_integrity,
copy=copy)
return op.get_result()
class _Concatenator(object):
"""
Orchestrates a concatenation operation for BlockManagers
"""
def __init__(self, objs, axis=0, join='outer', join_axes=None,
keys=None, levels=None, names=None,
ignore_index=False, verify_integrity=False, copy=True):
if isinstance(objs, (NDFrame, compat.string_types)):
raise TypeError('first argument must be an iterable of pandas '
'objects, you passed an object of type '
'"{name}"'.format(name=type(objs).__name__))
if join == 'outer':
self.intersect = False
elif join == 'inner':
self.intersect = True
else: # pragma: no cover
raise ValueError('Only can inner (intersect) or outer (union) '
'join the other axis')
if isinstance(objs, dict):
if keys is None:
keys = sorted(objs)
objs = [objs[k] for k in keys]
else:
objs = list(objs)
if len(objs) == 0:
raise ValueError('No objects to concatenate')
if keys is None:
objs = list(com._not_none(*objs))
else:
# #1649
clean_keys = []
clean_objs = []
for k, v in zip(keys, objs):
if v is None:
continue
clean_keys.append(k)
clean_objs.append(v)
objs = clean_objs
name = getattr(keys, 'name', None)
keys = Index(clean_keys, name=name)
if len(objs) == 0:
raise ValueError('All objects passed were None')
# consolidate data & figure out what our result ndim is going to be
ndims = set()
for obj in objs:
if not isinstance(obj, NDFrame):
msg = ('cannot concatenate object of type "{0}";'
' only pd.Series, pd.DataFrame, and pd.Panel'
' (deprecated) objs are valid'.format(type(obj)))
raise TypeError(msg)
# consolidate
obj._consolidate(inplace=True)
ndims.add(obj.ndim)
# get the sample
# want the higest ndim that we have, and must be non-empty
# unless all objs are empty
sample = None
if len(ndims) > 1:
max_ndim = max(ndims)
for obj in objs:
if obj.ndim == max_ndim and np.sum(obj.shape):
sample = obj
break
else:
# filter out the empties if we have not multi-index possibilities
# note to keep empty Series as it affect to result columns / name
non_empties = [obj for obj in objs
if sum(obj.shape) > 0 or isinstance(obj, Series)]
if (len(non_empties) and (keys is None and names is None and
levels is None and
join_axes is None and
not self.intersect)):
objs = non_empties
sample = objs[0]
if sample is None:
sample = objs[0]
self.objs = objs
# Standardize axis parameter to int
if isinstance(sample, Series):
axis = DataFrame()._get_axis_number(axis)
else:
axis = sample._get_axis_number(axis)
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, DataFrame)
if self._is_frame:
axis = 1 if axis == 0 else 0
self._is_series = isinstance(sample, Series)
if not 0 <= axis <= sample.ndim:
raise AssertionError("axis must be between 0 and {ndim}, input was"
" {axis}".format(ndim=sample.ndim, axis=axis))
# if we have mixed ndims, then convert to highest ndim
# creating column numbers as needed
if len(ndims) > 1:
current_column = 0
max_ndim = sample.ndim
self.objs, objs = [], self.objs
for obj in objs:
ndim = obj.ndim
if ndim == max_ndim:
pass
elif ndim != max_ndim - 1:
raise ValueError("cannot concatenate unaligned mixed "
"dimensional NDFrame objects")
else:
name = getattr(obj, 'name', None)
if ignore_index or name is None:
name = current_column
current_column += 1
# doing a row-wise concatenation so need everything
# to line up
if self._is_frame and axis == 1:
name = 0
obj = sample._constructor({name: obj})
self.objs.append(obj)
# note: this is the BlockManager axis (since DataFrame is transposed)
self.axis = axis
self.join_axes = join_axes
self.keys = keys
self.names = names or getattr(keys, 'names', None)
self.levels = levels
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
self.copy = copy
self.new_axes = self._get_new_axes()
def get_result(self):
# series only
if self._is_series:
# stack blocks
if self.axis == 0:
name = com._consensus_name_attr(self.objs)
mgr = self.objs[0]._data.concat([x._data for x in self.objs],
self.new_axes)
cons = _concat._get_series_result_type(mgr, self.objs)
return cons(mgr, name=name).__finalize__(self, method='concat')
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
cons = _concat._get_series_result_type(data)
index, columns = self.new_axes
df = cons(data, index=index)
df.columns = columns
return df.__finalize__(self, method='concat')
# combine block managers
else:
mgrs_indexers = []
for obj in self.objs:
mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
if ax == self.axis:
# Suppress reindexing on concat axis
continue
obj_labels = mgr.axes[ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindex(new_labels)[1]
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
mgrs_indexers, self.new_axes, concat_axis=self.axis,
copy=self.copy)
if not self.copy:
new_data._consolidate_inplace()
cons = _concat._get_frame_result_type(new_data, self.objs)
return (cons._from_axes(new_data, self.new_axes)
.__finalize__(self, method='concat'))
def _get_result_dim(self):
if self._is_series and self.axis == 1:
return 2
else:
return self.objs[0].ndim
def _get_new_axes(self):
ndim = self._get_result_dim()
new_axes = [None] * ndim
if self.join_axes is None:
for i in range(ndim):
if i == self.axis:
continue
new_axes[i] = self._get_comb_axis(i)
else:
if len(self.join_axes) != ndim - 1:
raise AssertionError("length of join_axes must not be equal "
"to {length}".format(length=ndim - 1))
# ufff...
indices = compat.lrange(ndim)
indices.remove(self.axis)
for i, ax in zip(indices, self.join_axes):
new_axes[i] = ax
new_axes[self.axis] = self._get_concat_axis()
return new_axes
def _get_comb_axis(self, i):
data_axis = self.objs[0]._get_block_manager_axis(i)
try:
return _get_objs_combined_axis(self.objs, axis=data_axis,
intersect=self.intersect)
except IndexError:
types = [type(x).__name__ for x in self.objs]
raise TypeError("Cannot concatenate list of {types}"
.format(types=types))
def _get_concat_axis(self):
"""
Return index to be used along concatenation axis.
"""
if self._is_series:
if self.axis == 0:
indexes = [x.index for x in self.objs]
elif self.ignore_index:
idx = com._default_index(len(self.objs))
return idx
elif self.keys is None:
names = [None] * len(self.objs)
num = 0
has_names = False
for i, x in enumerate(self.objs):
if not isinstance(x, Series):
raise TypeError("Cannot concatenate type 'Series' "
"with object of type {type!r}"
.format(type=type(x).__name__))
if x.name is not None:
names[i] = x.name
has_names = True
else:
names[i] = num
num += 1
if has_names:
return Index(names)
else:
return com._default_index(len(self.objs))
else:
return _ensure_index(self.keys)
else:
indexes = [x._data.axes[self.axis] for x in self.objs]
if self.ignore_index:
idx = com._default_index(sum(len(i) for i in indexes))
return idx
if self.keys is None:
concat_axis = _concat_indexes(indexes)
else:
concat_axis = _make_concat_multiindex(indexes, self.keys,
self.levels, self.names)
self._maybe_check_integrity(concat_axis)
return concat_axis
def _maybe_check_integrity(self, concat_index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index.get_duplicates()
raise ValueError('Indexes have overlapping values: '
'{overlap!s}'.format(overlap=overlap))
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _make_concat_multiindex(indexes, keys, levels=None, names=None):
if ((levels is None and isinstance(keys[0], tuple)) or
(levels is not None and len(levels) > 1)):
zipped = compat.lzip(*keys)
if names is None:
names = [None] * len(zipped)
if levels is None:
_, levels = _factorize_from_iterables(zipped)
else:
levels = [_ensure_index(x) for x in levels]
else:
zipped = [keys]
if names is None:
names = [None]
if levels is None:
levels = [_ensure_index(keys)]
else:
levels = [_ensure_index(x) for x in levels]
if not _all_indexes_same(indexes):
label_list = []
# things are potentially different sizes, so compute the exact labels
# for each level and pass those to MultiIndex.from_arrays
for hlevel, level in zip(zipped, levels):
to_concat = []
for key, index in zip(hlevel, indexes):
try:
i = level.get_loc(key)
except KeyError:
raise ValueError('Key {key!s} not in level {level!s}'
.format(key=key, level=level))
to_concat.append(np.repeat(i, len(index)))
label_list.append(np.concatenate(to_concat))
concat_index = _concat_indexes(indexes)
# these go at the end
if isinstance(concat_index, MultiIndex):
levels.extend(concat_index.levels)
label_list.extend(concat_index.labels)
else:
codes, categories = _factorize_from_iterable(concat_index)
levels.append(categories)
label_list.append(codes)
if len(names) == len(levels):
names = list(names)
else:
# make sure that all of the passed indices have the same nlevels
if not len(set([idx.nlevels for idx in indexes])) == 1:
raise AssertionError("Cannot concat indices that do"
" not have the same number of levels")
# also copies
names = names + _get_consensus_names(indexes)
return MultiIndex(levels=levels, labels=label_list, names=names,
verify_integrity=False)
new_index = indexes[0]
n = len(new_index)
kpieces = len(indexes)
# also copies
new_names = list(names)
new_levels = list(levels)
# construct labels
new_labels = []
# do something a bit more speedy
for hlevel, level in zip(zipped, levels):
hlevel = _ensure_index(hlevel)
mapped = level.get_indexer(hlevel)
mask = mapped == -1
if mask.any():
raise ValueError('Values not found in passed level: {hlevel!s}'
.format(hlevel=hlevel[mask]))
new_labels.append(np.repeat(mapped, n))
if isinstance(new_index, MultiIndex):
new_levels.extend(new_index.levels)
new_labels.extend([np.tile(lab, kpieces) for lab in new_index.labels])
else:
new_levels.append(new_index)
new_labels.append(np.tile(np.arange(n), kpieces))
if len(new_names) < len(new_levels):
new_names.extend(new_index.names)
return MultiIndex(levels=new_levels, labels=new_labels, names=new_names,
verify_integrity=False)
|
apache-2.0
|
bilgili/nest-simulator
|
testsuite/manualtests/test_tsodyks_depr_fac.py
|
13
|
1136
|
# -*- coding: utf-8 -*-
#
# test_tsodyks_depr_fac.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from scipy import *
from matplotlib.pylab import *
from matplotlib.mlab import *
def plot_spikes():
dt = 0.1 # time resolution
nbins = 1000
N = 500 # number of neurons
vm = load('voltmeter-0-0-4.dat')
figure(1)
clf()
plot(vm[:,0], vm[:,1], 'r')
xlabel('time / ms')
ylabel('$V_m [mV]$')
savefig('test_tsodyks_depressing.png')
plot_spikes()
show()
|
gpl-2.0
|
marcocaccin/scikit-learn
|
sklearn/manifold/setup.py
|
99
|
1243
|
import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.c"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
|
bsd-3-clause
|
cwebster2/pyMeteo
|
pymeteo/skewt.py
|
1
|
45186
|
#!/usr/bin/env python
"""
.. module:: pymeteo.skewt
:platform: Unix, Windows
:synopsis: Skew-T/Log-P plotting
.. moduleauthor:: Casey Webster <casey.webster@gmail.com>
This module allows plotting Skew-T/Log-P diagrams
and hodographs from arrays of
data with helper functions to plot directly from CM1 output files
in either Grads or HDF5 format and also from sounding data text
files in the format used for WRF and CM1 initialization.
This module also has code to produce rudimentary analysis of
the soundings data. Currently this is limited to CAPE (surface
and most unstable), CIN, wind shear, storm relative helicity, lifted index
and a rough estimate of storm motion based on the Bunkers (2000)
method.
.. figure:: _static/images/skewt.png
:align: center
Example Skew-T/Log-P with hodograph
Plotting
++++++++
High level plotting functions
-----------------------------
* :py:func:`plot_cm1h5` -- plots skewt from CM1 generated hdf5 files processed by ingest_cm1
* :py:func:`plot_cm1` -- plots skewt from CM1 generated GRaDS style output
* :py:func:`plot_wrf` -- plots skewt from WRF generated NetCDF output
* :py:func:`plot_sounding_data` -- plots skewt from CM1/WRF input sounding files
* :py:func:`plot_sounding_data_uwyo` -- plots skewt from uwyo sounding data (file or online)
These functions are called by command line scripts provided to make plotting from data files easy.
You can invoke these through command line scripts as:
.. code-block:: bash
# From tabular sounding data (e.g. WRF or CM1 intial sounding data)
$ skewt tabular -f sounding.dat skewt.pdf
# From GrADS stlye CM1 output
$ skewt cm1 -p . -d cm1out -x 0 -y 0 skewt-cm1.pdf
# From HDF5 CM1 output
$ skewt cm1hdf -f model-data.h5 -x 0 -y 0 skewt.pdf
# From WRF output
$ skewt wrf -f wrfout.nc --lat 30 --lon -80 -t 0 skewt.pdf
# From University of Wyoming data file
$ skewt uwyo -f uwyo-data.dat skewt.pdf
# From University of Wyming website data (live)
$ skewt uwyoweb --station 72251 skewt.pdf
* :py:func:`plot` -- generic high level plot function
.. code-block:: python
import numpy as np
import pymeteo.skewt as skewt
# prepare 1D arrays height (z), pressure (p), potential temperature (th),
# water vapor mixing ratio (qv), winds (u and v) all of the same length.
skewt.plot(None, z, th, p, qv, u, v, 'skewt.pdf')
Variables affecting the plot output
-----------------------------------
These variables affect the skewness of the plot and the bounds of the plot
* :py:data:`skew_angle` -- skewness
* :py:data:`Tmin`, :py:data:`Tmax` -- T dimension bounds (temperature, C)
* :py:data:`pbot`, :py:data:`ptop` -- p dimension bounds (pressure, Pa)
These variables affect the lines plotted on the skew-t
* :py:data:`isobars`
* :py:data:`isotherms`
* :py:data:`dry_adiabats`
* :py:data:`moist_adiabats`
* :py:data:`mixing_ratios`
These variables affect the plotting of the lines above
* :py:data:`lw_major`, :py:data:`lw_minor`
* :py:data:`lc_major`, :py:data:`lc_minor`
These variables affect the plotting style of skewt data
* :py:data:`linecolor_T`, :py:data:`linewidth_T`
* :py:data:`linecolor_Tve`, :py:data:`linewidth_Tve`, :py:data:`linestyle_Tve`
* :py:data:`linecolor_Td`, :py:data:`linewidth_Td`
* :py:data:`linecolor_Twb`, :py:data:`linewidth_Twb`
* :py:data:`linecolor_Parcel_T`, :py:data:`linewidth_Parcel_T`
* :py:data:`linecolor_Tvp`, :py:data:`linewidth_Tvp`, :py:data:`linestyle_Tvp`
Functions to draw isolines
--------------------------
If you are manually plotting skew-t data, these functions can be used to draw
various lines on your plot. These are used by the high level plotting functions.
* :py:func:`draw_isotherms`
* :py:func:`draw_isobars`
* :py:func:`draw_dry_adiabat`
* :py:func:`draw_moist_adiabat`
* :py:func:`draw_water_mix_ratio`
Module reference
++++++++++++++++
"""
import os
import math
import pymeteo.thermo as met
import pymeteo.dynamics as dyn
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import h5py
import pymeteo.cm1.read_grads as cm1
import pymeteo.interp
import pymeteo.constants as metconst
from netCDF4 import Dataset
import datetime as dt
import pymeteo.wrf as wrf
import datetime
import pymeteo.uwyo as uwyo
# This defines the skew-angle of the T axis
skew_angle = 37.5
"""This defines the skewness of the T axis"""
# These define the domain plotted. T values are at @1000 mb (e.g. unskewed)
Tmin = -40.0
"""Sets the left boundary of the plot. Temperature at 1000 mb (C)"""
Tmax = 40.0
"""Sets the right boundary of the plot. Temperature at 1000 mb (C)"""
pbot = 105000.0
"""Sets the bottom boundary of the plot. Pressure (Pa)"""
ptop = 10000.0
"""Sets the top boundary of the plot. Pressure (Pa)"""
## Values below used for plotting
dp = 5000.0
"""The delta pressure used in calculating adiabats"""
ptickbot = 100000.0
"""Lowest elevated pressure level to be labelled in the plot (Pa)"""
pticktop = 10000.0
"""Highest elevated pressure level to be labelled in the plot (Pa)"""
tickdp = 10**4
"""The spacing between pressure labels in the plot (Pa)"""
plevs = np.arange(pbot,ptop-1,-dp)
"""List of pressure levels to do calculations on"""
fontscalefactor = 1
## Lines to plot
isotherms = np.arange(-150,51,10) # in degrees C
"""List of isotherms to plot on the skew-t. In degrees C"""
isobars = np.arange(ptickbot,ptop-1,-5000) # in Pa
"""List of isobars to plot on the skew-t. In Pascals"""
dry_adiabats = np.arange(-40,210,10) # in degrees C
"""List of dry adiabats to plot on the skew-t. In degrees C"""
moist_adiabats = np.concatenate((np.arange(-15.,10.1,5.),np.arange(12.,45.1,2.)))
"""List of moist adiabats to plot on the skew-t. In degrees C"""
mixing_ratios = [0.2,0.4,0.8,1,2,3,4,6,8,10,14,18,24,32,40]
"""List of water vapor mixing ratio lines to plot. In g/kg"""
## Linewidths
lw_major = 0.6
"""Line width of 'major' lines. E.g. Lines plotted at 10 C intervals or 50 mb intervals"""
lw_minor = 0.25
"""Line width of 'minor' lines. E.g. Lines not plotted at the major intervals"""
## Linecolors
lc_major = 'grey'
"""Line color of 'major' lines. E.g. Lines plotted at 10 C intervals or 50 mb intervals"""
lc_minor = 'lightgrey'
"""Line color of 'minor' lines. E.g. Lines not plotted at the major intervals"""
## Skew-T line parameters
linecolor_T = 'black'
"""Line color of environmental temperature profile"""
linewidth_T = 1.5
"""Line width of environmental temperature profile"""
linecolor_Td = 'green'
"""Line color of environmental dew-point temperature profile"""
linewidth_Td = 1.5
"""Line width of environmental dew-point temperature profile"""
linecolor_Parcel_T = 'red'
"""Line color of lifted surface parcel temperature profile"""
linewidth_Parcel_T = 1.0
"""Line width of lifted surface parcel temperature profile"""
linecolor_Twb = 'blue'
"""Line color of environmental wet-bulb temperature profile"""
linewidth_Twb = 0.5
"""Line width of environmental wet-bulb temperature profile"""
linecolor_Tve = 'black'
"""Line color of environmental virtual temperature profile"""
linewidth_Tve = 0.7
"""Line width of environmental virtual temperature profile"""
linestyle_Tve = '--'
"""Line style of environmental virtual temperature profile"""
linecolor_Tvp = 'red'
"""Line color of lifted surface parcel virtual temperature profile"""
linewidth_Tvp = 0.7
"""Line width of lifted surface parcel virtual temperature profile"""
linestyle_Tvp = '--'
"""Line style of lifted surface parcel virtual temperature profile"""
#for plotted lines
pb_plot=105000
pt_plot=10000
pt_plot2=20000
dp_plot=1000
plevs_plot = np.arange(pb_plot,pt_plot-1,-dp_plot)
plevs_plot2 = np.arange(pb_plot,pt_plot2-1,-dp_plot)
plevs_std = [100000,85000,70000,50000,40000,30000,25000,20000,15000]
#TODO: enforce square w/ aspect ratio of plot
#Domain of the hodograph
umin = -22.5
umax = 27.5
vmin = -12.5
vmax = 27.5
##################################################################################
def plot_cm1h5(filename, xi, yi, output):
""" Plots a skewt from an HDF5 file.
:param filename: The name of the HDF5 file to open.
:type filename: str
:param xi: The X gridpoint of the skewt plot.
:type xi: int
:param yi: The Y gridpoint of the skewt plot.
:type yi: int
:param output: Output filename to save plot
:type output: str
To use this function the HDF5 file must have the following
variables:
- *xh* -- x scalar grid locations
- *yh* -- y scalar grid locations
- *z* -- z grid locations
- *time* -- timestep of file data (scalar)
- *th* -- potential temperature (K)
- *thpert* -- potential temperature perturbation (K)
- *prs* -- pressure (Pa)
- *u* -- u wind speed (m/s)
- *v* -- v wind speed (m/s)
- *qv* -- water vapor mixing ratio (kg/kg)
The names of these variables correspond to default naming by
CM1 using HDF5 output.
"""
f = h5py.File(filename, 'r')
z = f["/mesh/zh"][:] # m
x = f["/mesh/xh"][xi] # m
y = f["/mesh/yh"][yi] # m
t = f["/time"][0] # s
th = f["/3d_s/thpert"][:,yi,xi] + f["/basestate/th0"][:] # K
p = f["/3d_s/ppert"][:,yi,xi] + f["/basestate/pres0"][:] # Pa
u = f["/3d_u/u"][:,yi,xi] # m/s
v = f["/3d_v/v"][:,yi,xi] # m/s
qv = f["/3d_s/qvpert"][:,yi,xi] + f["/basestate/qv0"][:] #kg/kg
print(x,y,z[0],t,th[0],u[0],v[0],p[0],qv[0])
plot_old(x,y,z,t,th,p,qv,u,v,filename, output)
##################################################################################
def plot_wrf(filename, lat, lon, time, output):
""" Plots a skewt from an WRF NetCDF output file.
:param filename: The name of the NetCDF file to open.
:type filename: str
:param lat: The latitude of the location of the skewt plot.
:type lat: float
:param lon: The longitude of the location of the skewt plot.
:type lon: int
:param output: Output filename to save plot
:type output: str
This function assumes the NetCDF file was produce by the WRF model.
If another program produces the file but names variables in the same
manner as the WRF model, this function should work for that dat as well.
The variables using in the wrfout.nc data are:
- Times ?
- U(Time, bottom_top, south_north, west_east_stag)
- V(Time, bottom_top, south_north_stag, west_east)
- T(Time, bottom_top, south_north, west_east) Theta perturbation
- P(Time, bottom_top, south_north, west_east) Pres perturbation
- PB(Time, bottom_top, south_north, west_east) pres base state
- QVAPOR(Time, bottom_top, south_north, west_east)
- PH(Time, bottom_top_stag, south_north, west_east)
- PHB(Time, bottom_top_stag, south_north, west_east)
Z = (((PH(k)+PH(k+1)) / 2) + ((PHB(k)+(PHB(k+1)) / 2) / 9.81
- TH2(Time, south_north, west_east) -- 2m pot temp
- PSFC(Time, south_north, west_east) -- surface pres
- U10(Time, south_north, west_east) -- 10m U
- V10(Time, south_north, west_east) -- 10m V
- XTIME(Time) -- minutes since start of sim
- XLAT, XLONG, XLAT_U/V, XLONG_U/V
- Start_date
Need time, theta, pressure
"""
# Open NetCDF file
f = Dataset(filename, 'r')
wrf_lats = f.variables['XLAT'][time,:,:] # :,0
wrf_lons = f.variables['XLONG'][time,:,:]# 0,:
wrf_time = (f.variables['Times'][time]).tostring().decode('UTF-8')
# refactor this into a wrf utility module
map_proj = f.getncattr('MAP_PROJ')
truelat1 = f.getncattr('TRUELAT1')
truelat2 = f.getncattr('TRUELAT2')
stand_lon= f.getncattr('STAND_LON')
dx = f.getncattr('DX')
dy = f.getncattr('DY')
ref_lat = wrf_lats[0,0]
ref_lon = wrf_lons[0,0]
i,j = wrf.ll_to_ij(map_proj, truelat1, truelat2, stand_lon, dx, dy, ref_lat, ref_lon, lat, lon)
ref_lat_u = f.variables['XLAT_U'][time,0,0]
ref_lon_u = f.variables['XLONG_U'][time,0,0]
ref_lat_v = f.variables['XLAT_V'][time,0,0]
ref_lon_v = f.variables['XLONG_V'][time,0,0]
i_u,j_u = wrf.ll_to_ij(map_proj, truelat1, truelat2, stand_lon, dx, dy, ref_lat_u, ref_lon_u, lat, lon)
i_v,j_v = wrf.ll_to_ij(map_proj, truelat1, truelat2, stand_lon, dx, dy, ref_lat_v, ref_lon_v, lat, lon)
# location
N = 'N'
if (lat < 0.):
N = 'S'
E = 'E'
if (lon < 0.):
E = 'W'
x = "{0} {1}, {2} {3}".format(abs(lat), N, abs(lon), E)
# pressure
p_surface = f.variables['PSFC'][time,j,i]
p = f.variables['P'][time,:,j,i] + f.variables['PB'][time,:,j,i]
p = np.insert(p, 0, p_surface)
# z heights
ph = f.variables['PH'][time,:,j,i]
phb = f.variables['PHB'][time,:,j,i]
z_surface = ph[0]
z = np.zeros(len(ph)-1, np.float32)
for k in range(len(z)):
z[k] = ((( ph[k]+ph[k+1] ) / 2.0) + ( phb[k] + phb[k+1]) / 2.0 ) / 9.81
z = np.insert(z, 0, z_surface)
# t
t = wrf_time
# theta
th_surface = f.variables['TH2'][time,j,i]
th = f.variables['T'][time,:,j,i] + 300.0
th = np.insert(th, 0, th_surface)
# u
u_surface = f.variables['U10'][time,j,i]
u = f.variables['U'][time,:,j_u,i_u]
u = np.insert(u, 0, u_surface)
# v
v_surface = f.variables['V10'][time,j,i]
v = f.variables['V'][time,:,j_v,i_v]
v = np.insert(v, 0, v_surface)
# qv
qv_surface = f.variables['Q2'][time,j,i]
qv = f.variables['QVAPOR'][time,:,j,i]
qv = np.insert(qv, 0, qv_surface)
title = os.path.basename(filename)
print(x, z[0],t,th[0],u[0],v[0],p[0],qv[0])
plot(x, z, th, p, qv, u, v, output, t, title)
##################################################################################
#
def plot_sounding_data(filename, output):
"""Plot SkewT from a WRF / CM1 compatible sounding data file
:param filename: The name of the file to open.
:type filename: str
:param output: The name of the file to output plot
:type output: str
The datafile is the same format as used in sounding initalization
files for WRF and CM1.
The format is:
1 line header with surface pressure (mb), theta (K) and qv (g/kg)
n number of lines with z (m), theta (K), qv (g/kg), u (m/s), v(m/s)
"""
# load first line of file
with open(filename, 'r') as f:
surface = f.readline()
p0, th0, qv0 = surface.split()
# load rest of file
_z, _th, _qv, _u, _v = np.loadtxt(filename, unpack=True, skiprows=1)
# create arrays with one more z index for surface values
nk = len(_z) + 1
z = np.empty(nk, np.float32)
th= np.empty(nk, np.float32)
qv= np.empty(nk, np.float32)
u = np.empty(nk, np.float32)
v = np.empty(nk, np.float32)
p = np.empty(nk, np.float32)
# copy the arrays, leaving room at the surface
z[1:nk] = _z
th[1:nk] = _th
qv[1:nk] = _qv / 1000.
u[1:nk] = _u
v[1:nk] = _v
# assign surface values
z[0] = 0.
th[0] = float(th0)
qv[0] = float(qv0) / 1000.
u[0] = 1.75*u[1]-u[2]+0.25*u[3]
v[0] = 1.75*v[1]-v[2]+0.25*v[3]
p[0] = float(p0) * 100.
# integrate pressure, assume hydrostatic
# dp = -rho*g*dz
for k in np.arange(1,nk):
p[k] = p[k-1] * np.exp((metconst.g*(z[k-1]-z[k]))/(metconst.Rd*met.T((th[k]+th[k-1])/2.,p[k-1])))
#for k in np.arange(nk):
# print(z[k], p[k], th[k], qv[k], u[k], v[k])
plot(None, z, th, p, qv, u, v, output, title="input sounding")
def plot_sounding_data_uwyo(filename, output, stationID=0, date=None):
"""Plot SkewT from University of Wyoming sounding data
:param filename: The name of the file containing sounding data
:type filemane: str
:param output: The name of the file to output plot
:type output: str
:param stationID: The station ID of a sounding station
:type stationID: int
:param date: Date and time of a sounding to request
:type date: datetime
If filename is not None then this function will plot data from that file.
If filename is None and stationID is non-zero, then this will request a sounding
datafile from http://weather.uwyo.edu for the specified date. If date is None
then the requested sounding will be the most recent sounding taken at either
12Z or 00Z.
"""
if (filename != None):
title, p, z, qv, wind_dir, wind_speed, th = uwyo.fetch_from_file(filename)
elif (stationID != 0):
if date is None:
date = datetime.datetime.utcnow()
title, p, z, qv, wind_dir, wind_speed, th = uwyo.fetch_from_web(date, stationID)
else:
print("Neither input file or station ID was provided. No output.")
p, z, qv, u, v, th = uwyo.transform_and_check_data(p, z, qv, wind_dir, wind_speed, th)
plot(None, z, th, p, qv, u, v, output, title=title)
def plot_sounding_data_csv(filename, output):
"""Plot SkewT from a CSV sounding data file
:param filename: The name of the file to open.
:type filename: str
:param output: The name of the file to output plot
:type output: str
The datafile format is CSV with the following columns:
- pressure (mb)
- height (m)
- temperature (C)
- dew point (C)
- wind direction (degrees)
- wind speed (m/s)
Missing values should be filled with the value -9999.00
"""
p,z,T,Td,wdir,wspd = np.loadtxt(filename, delimiter=',', unpack=True)
# Pressure to Pa
p = p * 100.
# z to km
#z = z / 1000.
# interpolate missing wind
nk = len(z)
u = np.empty(nk, np.float32)
v = np.empty(nk, np.float32)
for k in range(nk):
if wdir[k] == -9999. and wspd[k] == -9999.:
u[k] = v[k] = -9999.
else:
u[k], v[k] = dyn.wind_deg_to_uv(wdir[k], wspd[k])
#print('{0:5.2f} {1:5.2f} = {2:5.2f} {3:5.2f}'.format(wdir[k], wspd[k], u[k], v[k]))
_z = np.empty(2,np.float32)
_u = np.empty(2,np.float32)
_v = np.empty(2,np.float32)
print('INTERPOLATING')
for k in range(nk):
if wdir[k] == -9999. and wspd[k] == -9999.:
kb = ke = k
while kb >= 0 and wdir[kb] == -9999. and wspd[kb] == -9999.:
kb -= 1
while ke <= nk-1 and wdir[ke] == -9999. and wspd[ke] == -9999.:
ke += 1
# everything in bounds
if kb >= 0 and ke <= nk-1:
_z[0] = z[kb]
_z[1] = z[ke]
_u[0] = u[kb]
_u[1] = u[ke]
_v[0] = v[kb]
_v[1] = v[ke]
u[k] = pymeteo.interp.linear(_z, _u, z[k])
v[k] = pymeteo.interp.linear(_z, _v, z[k])
elif kb < 0:
u[k] = u[ke]
v[k] = v[ke]
elif ke > nk-1:
u[k] = u[kb]
v[k] = v[kb]
for k in range(nk):
# kt to m/s
u[k] = u[k] * 0.5144444
v[k] = v[k] * 0.5144444
# print('{0:5.2f} {1:5.2f} = {2:5.2f} {3:5.2f}'.format(wdir[k], wspd[k], u[k], v[k]))
# calc theta
th = np.empty(nk, np.float32)
# calc qv
qv = np.empty(nk, np.float32)
for k in range(nk):
th[k] = met.theta(T[k]+met.T00, p[k])
w = met.es(Td[k]+met.T00) / met.es(T[k]+met.T00)
pp = met.es(T[k]+met.T00) / p[k]
qv[k] = 0.622 * pp * w
#qv[k] = met.es(Td[k]+met.T00) / (met.Rv * (T[k]+met.T00))
#print(z, th, p, qv, u, v)
plot(None, z, th, p, qv, u, v, output, title='Sounding Data')
##################################################################################
# plot_cm1
#
# This plots a skewt at domain location xi,yi at time t=0 for a given CM1 dataset
# in grads format
#
def plot_cm1(path, filename, xi, yi,output):
"""Plot skewt from a Grads format CM1 output file
:parameter _path: Path to CM1 dataset
:type path: str
:parameter _filename: Filename of dataset
:type filename: str
:parameter x1: X gridpoint to plot SkewT
:parameter y1: Y gridpoint to plot SkewT
:parameter output: Filename to save skewt plot
This function plots a skewt from a CM1 output file.
This routine uses winds interpolated to the scalar
gridpoints.
"""
f = cm1.CM1(path, filename)
_z = f.dimZ[:] * 1000. # km->m
x = f.dimX[xi]
y = f.dimY[yi]
t = int(f.dimT[0])
f.read3dMultStart(t)
_th = f.read3dMult('th')
_p = f.read3dMult('prs')
_u = f.read3dMult('uinterp')
_v = f.read3dMult('vinterp')
_qv = f.read3dMult('qv')
f.read3dMultStop()
nk = len(_p[xi,yi,:])
th = np.empty(nk+1, np.float32)
p = np.empty(nk+1, np.float32)
u = np.empty(nk+1, np.float32)
v = np.empty(nk+1, np.float32)
qv = np.empty(nk+1, np.float32)
z = np.empty(nk+1, np.float32)
th[1:] = _th[xi,yi,:]
p[1:] = _p[xi,yi,:]
u[1:] = _u[xi,yi,:]
v[1:] = _v[xi,yi,:]
qv[1:] = _qv[xi,yi,:]
z[1:] = _z[:]
# surface values
p[0] = 100000.
th[0]= 300.
qv[0]= 0.014
u[0] = u[1]
v[0] = v[1]
z[0] = 0.
plot_old(x,y,z,t,th,p,qv,u,v,filename, output)
##################################################################################
# plot
#
# This is the main skewT plotting function for a single output page containing
# A skewt, hodograph and an information block (currently disabled).
#
#TODO: turn x,y into "location text" at end of arg list default None
#TODO: put title at end default None.
#TODO: pass both to plot_the_rest
def plot_old(x, y, z, time, th, p, qv, u, v, title, output):
plot("{0} km, {1} km".format(x,y), z, th, p, qv, u, v, output, time, title)
def plot(loc, z, th, p, qv, u, v, output, time = None, title = None):
"""Plots Skew-T/Log-P diagrapms with hodograph
The helper functions above facilitate loading data from
various formats and then call this function. If you have
data in another format or arrays of data in python already,
then this is the function you want to use.
:parameter loc: Location string
:parameter z: z grid mesh (1D)
:parameter time: Time string
:parameter th: Potential temperature at z points
:parameter p: Pressure at z points
:parameter qv: Water vapor mixing ratio at z points
:parameter u: u winds at z points
:parameter v: v winds at z points
:parameter title: Title for plot
:parameter output: Filename to save plot to
"""
fig = plt.figure(1, figsize=(10, 8), dpi=300, edgecolor='k')
# sounding
ax1 = plt.subplot(121)
plot_sounding_axes(ax1)
plot_sounding(ax1, z, th, p, qv, None, None)
# hodograph
ax2 = plt.subplot(222)
plot_hodo_axes(ax2)
plot_hodograph(ax2, z, u, v)
# datablock
ax3 = fig.add_subplot(224)
try:
plot_datablock(ax3, loc, z, time, th, p, qv, u, v, title)
except:
print("Error calcualting sounding stats, datablock omitted");
# wind barbs
ax4 = fig.add_subplot(132)
plot_wind_axes(ax4)
plot_wind_barbs(ax4,z,p,u,v)
# legend
ax5 = fig.add_subplot(4,4,15)
plot_legend(ax5)
# Adjust plot margins.
plt.subplots_adjust(left=0.03, bottom=0.03, right=0.97, top=0.97, wspace=0.12, hspace=0.12)
plt.savefig(output, dpi=300,bbox_inches=0)
plt.close()
def plot_sounding_axes(axes):
"""Plots Skew-T/Log-P axes
This will plot isotherms, isobars, dry and moist adiabats,
lines of constant water vapor mixing ratio, labels and
setup the y axes to be reversed.
:paramter axes: The axes to draw on
"""
draw_isotherms(axes)
draw_isobars(axes)
draw_dry_adiabat(axes)
draw_moist_adiabat(axes)
draw_water_mix_ratio(axes)
remove_tick_labels(axes)
axes.axis([Tmin, Tmax, pbot, ptop])
axes.set_ylim(axes.get_ylim()[::1])
def plot_wind_axes(axes):
# plot wind barbs
# TODO: also do storm-relative winds
draw_wind_line(axes)
axes.set_axis_off()
axes.axis([-1,1,pbot,ptop])
def plot_hodo_axes(axes):
"""Plot hodograph axes
This will plot range arcs and labels for a hodograph plot
"""
bounds = [-25,25,-25,25]
axes.axis('equal')
draw_hodograph(axes, bounds)
remove_tick_labels(axes)
axes.axis(bounds)
def plot_legend(axes):
"""Plot skew-t legend"""
tT = r'Temperature'
lT = Line2D(range(10), range(10), linestyle='-', marker='', linewidth=linewidth_T, color=linecolor_T)
tTd = r'Dew-point Temperature'
lTd = Line2D(range(10), range(10), linestyle='-', marker='', linewidth=linewidth_Td, color=linecolor_Td)
tPT = r'Lifted Surface Parcel Temperature'
lPT = Line2D(range(10), range(10), linestyle='-', marker='', linewidth=linewidth_Parcel_T,
color=linecolor_Parcel_T)
tTwb = r'Wet-bulb Temperature'
lTwb = Line2D(range(10), range(10), linestyle='-', marker='', linewidth=linewidth_Twb, color=linecolor_Twb)
tTve = r'Virtual Temperature'
lTve = Line2D(range(10), range(10), linestyle=linestyle_Tve, marker='', linewidth=linewidth_Tve,
color=linecolor_Tve)
tTvp = r'Lifted Surface Parcel Virtual Temperature'
lTvp = Line2D(range(10), range(10), linestyle=linestyle_Tvp, marker='', linewidth=linewidth_Tvp,
color=linecolor_Tvp)
plt.legend((lT, lTve, lTd, lTwb, lPT, lTvp,),(tT, tTve, tTd, tTwb, tPT, tTvp,),
loc=(0.125,0), fontsize=6, handlelength=10)
# loc =, frameon=, fontsize=
axes.set_axis_off()
def plot_wind(axes, z, p, u, v, x=0):
for i in np.arange(0,len(z),1):
if (p[i] > pt_plot):
plt.barbs(x,p[i],u[i],v[i], length=5, linewidth=.5)
def plot_sounding(axes, z, th, p, qv, u = None, v = None):
"""Plot sounding data
This plots temperature, dewpoint and wind data on a Skew-T/Log-P plot.
This will also plot derived values such as wetbulb temperature and
label the surface based LCL, LFC and EL.
:parameter z: height values (1D array)
:parameter th: potential temperature at z heights (1D array)
:parameter p: pressure at z heights (1D array)
:parameter qv: water vapor mixing ratio at z heights (1D array)
:parameter u: U component of wind at z heights (1D array)
:parameter v: V component of wind at z heights (1D array)
:paramter axes: The axes instance to draw on
"""
# calculate Temperature and dewpoint
T = met.T(th,p) - met.T00 # T (C)
Td = met.Td(p, qv) - met.T00 # Td (C)
# calculate wetbulb temperature
Twb = np.empty(len(z), np.float32) # Twb (C)
for zlvl in range(len(z)):
Twb[zlvl] = met.Twb(z, p, th, qv, z[zlvl])
# Get surface parcel CAPE and temperature / height profiles
pcl = met.CAPE(z, p, T+met.T00, qv, 1) # CAPE
T_parcel = pcl['t_p'] - met.T00 # parcel T (C)
T_vparcel = pcl['tv_p'] - met.T00 # parcel Tv (C)
T_venv = met.T(pcl['thv_env'], pcl['pp']) - met.T00 # Env Tv (C)
# plot Temperature, dewpoint, wetbulb and lifted surface parcel profiles on skew axes
axes.semilogy(T + skew(p), p, basey=math.e, color=linecolor_T , linewidth = linewidth_T)
axes.semilogy(Td + skew(p), p, basey=math.e, color=linecolor_Td, linewidth = linewidth_Td)
axes.semilogy(T_parcel + skew(pcl['pp']), pcl['pp'], basey=math.e,
color=linecolor_Parcel_T, linewidth=linewidth_Parcel_T)
axes.semilogy(Twb + skew(p), p, basey=math.e, color=linecolor_Twb, linewidth=linewidth_Twb)
# plot virtual temperature of environment and lifted parcel
axes.semilogy(T_venv + skew(pcl['pp']), pcl['pp'], basey=math.e, color=linecolor_Tve,
linewidth=linewidth_Tve, linestyle=linestyle_Tve)
axes.semilogy(T_vparcel + skew(pcl['pp']), pcl['pp'], basey=math.e, color=linecolor_Tvp,
linewidth=linewidth_Tvp, linestyle=linestyle_Tvp)
# Add labels for levels based on surface parcel
#debug print(pcl['lfcprs'], pcl['lclprs'], pcl['elprs'], pcl['ptops'])
if (pcl['lfcprs'] > 0):
label_m(Tmax-.5, pcl['lfcprs'], '--LFC', axes)
if (pcl['lclprs'] > 0):
label_m(Tmax-.5, pcl['lclprs'], '--LCL', axes)
if (pcl['elprs'] > 0):
label_m(Tmax-.5, pcl['elprs'], '--EL', axes)
if (pcl['ptops'] > 0):
label_m(Tmax-.5, pcl['ptops'], '--TOPS', axes)
# plot labels for std heights
for plvl in plevs_std:
zlvl = pymeteo.interp.interp_height(z,p,plvl)
label_m(Tmin-.5,plvl, str(int(zlvl)), axes)
# plot wind barbs on left side of plot. move this? right side?
if (u is not None and v is not None):
#draw_wind_line(axes)
for i in np.arange(0,len(z),2):
if (p[i] > pt_plot):
plt.barbs(Tmin+4,p[i],u[i],v[i], length=5, linewidth=.5)
def plot_wind_barbs(axes, z, p, u, v):
for i in np.arange(0,len(z)):
if (p[i] > pt_plot):
plt.barbs(0,p[i],u[i],v[i], length=5, linewidth=.5)
def plot_hodograph(axes, z, u, v):
"""Plot Hodograph data
This plots u and v winds vs height on a hodograph.
:parameter z: height values (1D array)
:parameter u: U component of wind at z heights (1D array)
:parameter v: V component of wind at z heights (1D array)
:paramter axes: The axes instance to draw on
"""
# plot hodograph
z6km = 0
try:
while z[z6km] <= 12000:
z6km += 1
except IndexError:
z6km = len(z)-1
axes.plot(u[0:z6km],v[0:z6km], color='black', linewidth=1.5)
for zlvl in np.arange(0,7000,1000):
ulvl = pymeteo.interp.linear(z,u,zlvl)
vlvl = pymeteo.interp.linear(z,v,zlvl)
#print('calculating winds at height {0} = ({1},{2})'.format(zlvl,ulvl,vlvl))
label_h2(ulvl+1,vlvl-1,str(zlvl/1000), 'black', 0, axes)
axes.plot(ulvl,vlvl, color='black', markersize=5, marker='.')
#TODO: fix this
try:
ucb = dyn.storm_motion_bunkers(u,v,z)
axes.plot(ucb[0],ucb[1],markersize=4,color='black',marker='x')
axes.plot(ucb[2],ucb[3],markersize=4,color='black',marker='x')
except:
print("Error calculating sounding stats, storm motion marker not plotted");
def calc_sounding_stats(_z, _th, _p, _qv):
T = met.T(_th,_p) # T (K)
pcl = met.CAPE(_z, _p, T, _qv, 1) # CAPE
mupcl = met.CAPE(_z, _p, T, _qv, 2) # MUCAPE
mlpcl = met.CAPE(_z, _p, T, _qv, 3) # MLCAPE
return (pcl,mupcl,mlpcl)
def calc_hodograph_stats(_z, _u, _v):
try:
ucb = dyn.storm_motion_bunkers(_u,_v,_z)
# SRH
srh01 = dyn.srh(_u, _v, _z, 0., 1000., ucb[0], ucb[1])
srh03 = dyn.srh(_u, _v, _z, 0., 3000., ucb[0], ucb[1])
except:
print("Error calculating storm motion")
ucb = [0.,0.,0.,0.]
srh01 = 0.
srh03 = 0.
try:
erh01 = dyn.srh(_u, _v, _z, 0., 1000., 0., 0.)
erh03 = dyn.srh(_u, _v, _z, 0., 3000., 0., 0.)
except:
print("Error calculating erh")
erh01 = 0.
erh03 = 0.
shear01 = dyn.uv_to_deg(*dyn.shear(_u, _v, _z, 0., 1000.))
shear03 = dyn.uv_to_deg(*dyn.shear(_u, _v, _z, 0., 3000.))
shear06 = dyn.uv_to_deg(*dyn.shear(_u, _v, _z, 0., 6000.))
shear12 = dyn.uv_to_deg(*dyn.shear(_u, _v, _z, 1000., 2000.))
shear23 = dyn.uv_to_deg(*dyn.shear(_u, _v, _z, 2000., 3000.))
shear34 = dyn.uv_to_deg(*dyn.shear(_u, _v, _z, 3000., 4000.))
shear45 = dyn.uv_to_deg(*dyn.shear(_u, _v, _z, 4000., 5000.))
shear56 = dyn.uv_to_deg(*dyn.shear(_u, _v, _z, 5000., 6000.))
dict = { 'bunkers' : ucb,
'srh01' : srh01,
'srh03' : srh03,
'erh01' : erh01,
'erh03' : erh03,
's01' : shear01,
's03' : shear03,
's06' : shear06,
's12' : shear12,
's23' : shear23,
's34' : shear34,
's45' : shear45,
's56' : shear56
}
return dict
def plot_datablock(ax4, _x,_z,_t,_th,_p,_qv,_u,_v, _title):
pcl, mupcl, mlpcl = calc_sounding_stats(_z, _th, _p, _qv)
shear = calc_hodograph_stats(_z, _u, _v)
brn = dyn.brn(_u, _v, _z, pcl['cape'])
# draw datablock
ax4.set_axis_off()
plt.axis([-1,1,-1,1])
plt.text(0,1,_title, verticalalignment='top', horizontalalignment='center', weight='bold', fontsize=10)
line = ""
if (_x != None):
line += "Sounding at location " + str(_x) + "."
if (_t != None):
line += " Time = " + str(_t) + "."
line += " " + str(len(_z)) + ' vertical levels'
plt.text(0,.85, line, verticalalignment='center', horizontalalignment='center', fontsize=5)
cth,cr = dyn.uv_to_deg(shear['bunkers'][0],shear['bunkers'][1])
line = 'Estimated storm motion (supercell right mover) -> {0:3d}$^\circ$ {1:3.1f} m/s'.format(int(cth),cr)
plt.text(-1,.75, line, verticalalignment='center', horizontalalignment='left', fontsize=5)
print_parcel_info('Surface Parcel', pcl, -1., .65)
print_parcel_info('Most Unstable Parcel', mupcl, -0.3, .65)
print_parcel_info('500 m Mixed Layer Parcel', mlpcl, 0.4, .65)
# LCL, CCL, EL, convective temp?
# other data?
x = 0.4
y = 0
line = 'Hodograph'
plt.text(x,y, line, verticalalignment='center', horizontalalignment='left', fontsize=5)
x += 0.02
y -= 0.065
line = '0-1 km shear {0:3d}$^\circ$ {1:3.1f} m/s'.format(int(shear['s01'][0]),shear['s01'][1])
plt.text(x,y, line, verticalalignment='center', horizontalalignment='left', fontsize=5)
y -= 0.05
line = '0-3 km shear {0:3d}$^\circ$ {1:3.1f} m/s'.format(int(shear['s03'][0]),shear['s03'][1])
plt.text(x,y, line, verticalalignment='center', horizontalalignment='left', fontsize=5)
y -= 0.05
line = '0-6 km shear {0:3d}$^\circ$ {1:3.1f} m/s'.format(int(shear['s06'][0]),shear['s06'][1])
plt.text(x,y, line, verticalalignment='center', horizontalalignment='left', fontsize=5)
y -= 0.05
line = 'SRH 0-1 : {0:d} m2/s2'.format(int(shear['srh01']))
plt.text(x,y, line, verticalalignment='center', horizontalalignment='left', fontsize=5)
y -= 0.05
line = 'SRH 0-3 : {0:d} m2/s2'.format(int(shear['srh03']))
plt.text(x,y, line, verticalalignment='center', horizontalalignment='left', fontsize=5)
y -= 0.05
line = 'ERH 0-1 : {0:d} m2/s2'.format(int(shear['erh01']))
plt.text(x,y, line, verticalalignment='center', horizontalalignment='left', fontsize=5)
y -= 0.05
line = 'ERH 0-3 : {0:d} m2/s2'.format(int(shear['erh03']))
plt.text(x,y, line, verticalalignment='center', horizontalalignment='left', fontsize=5)
y -= 0.05
line = 'BRN : {0:d}'.format(int(brn))
plt.text(x,y, line, verticalalignment='center', horizontalalignment='left', fontsize=5)
y -= 0.05
line = '0-1 km shear {0:3d}$^\circ$ {1:3.1f} m/s'.format(int(shear['s01'][0]),shear['s01'][1])
plt.text(x,y, line, verticalalignment='center', horizontalalignment='left', fontsize=5)
y -= 0.05
line = '1-2 km shear {0:3d}$^\circ$ {1:3.1f} m/s'.format(int(shear['s12'][0]),shear['s12'][1])
plt.text(x,y, line, verticalalignment='center', horizontalalignment='left', fontsize=5)
y -= 0.05
line = '2-3 km shear {0:3d}$^\circ$ {1:3.1f} m/s'.format(int(shear['s23'][0]),shear['s23'][1])
plt.text(x,y, line, verticalalignment='center', horizontalalignment='left', fontsize=5)
y -= 0.05
line = '3-4 km shear {0:3d}$^\circ$ {1:3.1f} m/s'.format(int(shear['s34'][0]),shear['s34'][1])
plt.text(x,y, line, verticalalignment='center', horizontalalignment='left', fontsize=5)
y -= 0.05
line = '4-5 km shear {0:3d}$^\circ$ {1:3.1f} m/s'.format(int(shear['s45'][0]),shear['s45'][1])
plt.text(x,y, line, verticalalignment='center', horizontalalignment='left', fontsize=5)
y -= 0.05
line = '5-6 km shear {0:3d}$^\circ$ {1:3.1f} m/s'.format(int(shear['s56'][0]),shear['s56'][1])
plt.text(x,y, line, verticalalignment='center', horizontalalignment='left', fontsize=5)
def print_3col(name, value, unit, x, y):
plt.text(x,y, name, verticalalignment='center', horizontalalignment='left', fontsize=5)
plt.text(x+.35,y, value, verticalalignment='center', horizontalalignment='right', fontsize=5)
plt.text(x+.4,y, unit, verticalalignment='center', horizontalalignment='left', fontsize=5)
def print_parcel_info(title, pcl, x, y):
plt.text(x,y, title, verticalalignment='center', horizontalalignment='left', fontsize=5)
y -= 0.065
x += 0.02
print_3col('CAPE', '{0}'.format(int(pcl['cape'])), 'J kg$^{-1}$', x, y)
y -= 0.05
print_3col('CIN', '{0}'.format(int(pcl['cin'])), 'J kg$^{-1}$', x, y)
y -= 0.05
print_3col('TOPS', '{0:4.2f}'.format(float(pcl['ztops'])), 'km', x, y)
y -= 0.05
print_3col(r'$\theta_e$', '{0:4.1f}'.format(float(pcl['theta_e'])), 'K', x, y)
y -= 0.05
print_3col('LI$_{MAX}$', '{0:3.1f}'.format(float(pcl['max_li'])), 'C', x, y)
y -= 0.05
print_3col('LI$_{500}$', '{0:3.1f}'.format(float(pcl['li500'])), 'C', x, y)
y -= 0.05
print_3col('LI$_{300}$', '{0:3.1f}'.format(float(pcl['li300'])), 'C', x, y)
y -= 0.05
print_3col('Parcel', '{0}'.format(int(pcl['prs']/100.)), 'mb', x, y)
def remove_tick_labels(axes):
axes.tick_params(axis='x', top='off', bottom='off', which='both')#, labelsize=0)
axes.tick_params(axis='y', left='off', right='off', which='both')#, labelsize=0)
for xlabel_i in axes.get_xticklabels():
xlabel_i.set_visible(False)
xlabel_i.set_fontsize(0.0)
for xlabel_i in axes.get_yticklabels():
xlabel_i.set_fontsize(0.0)
xlabel_i.set_visible(False)
def set_fontscalefactor(newfactor):
fontscalefactor = newfactor
def label(x, y, s, c, r, axes):
axes.text(x,y*100,s, verticalalignment='center', horizontalalignment='center', weight='bold', fontsize=5*fontscalefactor, color=c, rotation=r)
def label_h(x, y, s, c, r, axes):
axes.text(x,y,s, verticalalignment='center', horizontalalignment='center', weight='normal', fontsize=7*fontscalefactor, color=c, rotation=r)
def label_h2(x, y, s, c, r, axes):
axes.text(x,y,s, verticalalignment='center', horizontalalignment='center', weight='bold', fontsize=10*fontscalefactor, color=c, rotation=r)
def label_m(x, y, s, axes):
axes.text(x,y,s, verticalalignment='center', horizontalalignment='right', fontsize=4*fontscalefactor)
#draw hodograph u-v space
def draw_hodograph(axes, bounds):
xmin, xmax, ymin, ymax = bounds
gmax = max(xmax,ymax)
gmin = min(xmin,ymin)
# draw u-v grid
axes.plot([gmin,gmax],[0,0], color='black', linewidth=.5)
axes.plot([0,0],[gmin,gmax], color='black', linewidth=.5)
for u in np.arange(xmin+1,xmax):
if (u%5==0) and (u != 0):
label_h(u,-1,str(u),'black',0,axes)
for v in np.arange(ymin+1,ymax):
if (v%5==0) and (v != 0):
label_h(-1,v,str(v),'black',0,axes)
# draw speed rings
for u in np.arange(5,math.sqrt(xmax**2+ymax**2)+1,5):
axes.plot( *hodograph_circle(u), color='grey', linewidth=.3)
# helper function for circle points
def hodograph_circle(r):
phi = np.arange(0., 2*math.pi, 0.01)
return r*np.cos(phi), r*np.sin(phi)
# draws a vertical line axis upon which to draw wind barbs
def draw_wind_line(axes):
wind_line = []
for p in plevs_plot:
wind_line.append(0)
axes.semilogy(wind_line, plevs_plot, color='black', linewidth=.5)
# plot circles at certain levels?
for p in plevs_std:
axes.semilogy(0,p, color='black', markersize=3, marker='.')
# Puts the skew in skew-T
def skew(p):
"""Puts the skew in skew-T
:parameter p: pressure level of the point.
This calculates the skew of the T axis for a point to plot.
This assumes a logarithmic y axes and uses the variable
:py:data:skew_angle to determine the skew. This is the
magic that turns a cartesian plot into a Skew-T/Log-p plot.
"""
return skew_angle * np.log(met.p00/p)
# Draw isotherms on skew-T / log p axes
def draw_isotherms(axes):
"""Plot isotherms on axes
:parameter axes: The axes to draw on
:type axes: :py:class:`matplotlib.axes`
This function draws isotherms every 10 C.
"""
for T in isotherms:
if (T % 10 == 0):
axes.semilogy(T + skew(plevs_plot), plevs_plot, basey=math.e, color = lc_major, linewidth= lw_major)
else:
axes.semilogy(T + skew(plevs_plot), plevs_plot, basey=math.e, color = lc_minor, linewidth= lw_minor)
for T in np.arange(-40, 40, 10):
label(T+skew(87500),875, str(T), 'red', 90.-skew_angle, axes)
for T in np.arange(-100, -20, 10):
label(T+skew(17500),175, str(T), 'red', 90.-skew_angle, axes)
def draw_isobars(axes):
"""Plot isobars on axes
:parameter axes: The axes to draw on
:type axes: :py:class:`matplotlib.axes`
This function draws isobars at intervals of 2*dp.
"""
for i in isobars:
if (i % 5000 == 0):
axes.plot([Tmin, Tmax], [i,i], color = lc_major, linewidth = lw_major)
else:
axes.plot([Tmin, Tmax], [i,i], color = lc_minor, linewidth = lw_minor)
for i in np.arange(1000,100,-50):
label(-10-((1000-i)*.025),i,str(i),'black',0, axes)
def draw_dry_adiabat(axes):
"""Plot dry adiabats on axes
:parameter axes: The axes to draw on
:type axes: :py:class:`matplotlib.axes`
This function calculates dry adiabats
and plots these lines. Adiabats are calculated
every 10 K
"""
for T in dry_adiabats:
dry_adiabat = met.T(T+met.T00,plevs_plot) - met.T00 + skew(plevs_plot)
if (T % 10 == 0):
axes.semilogy(dry_adiabat, plevs_plot, basey=math.e, color = lc_major, linewidth = lw_major)
else:
axes.semilogy(dry_adiabat, plevs_plot, basey=math.e, color = lc_minor, linewidth = lw_minor)
for T in np.arange(-20, 150, 10):
p = (600. - 3.5*T)*100.
x = met.T(T+met.T00,p) -met.T00 + skew(p)
x1 = met.T(T+met.T00,p+.5*dp_plot) -met.T00 + skew(p+.5*dp_plot)
x2 = met.T(T+met.T00,p-.5*dp_plot) -met.T00 + skew(p-.5*dp_plot)
dx = x2-x1
theta = math.atan2(-dp_plot,-dx) * 180/math.pi +37
label(x,p/100,str(T),'black',theta, axes)
def draw_moist_adiabat(axes):
"""Plot moist adiabats on axes
:parameter axes: The axes to draw on
:type axes: :py:class:`matplotlib.axes`
This function calculates moist adiabats
and plots these lines. Adiabats are calculated for
values of T at 1000mb from -15 to 45 C every 5 C between
-15 and 10 C and every 2.5 C between 12.5 and 45 C.
"""
ps_blo = [p for p in plevs_plot if p > 100000]
ps_blo.reverse()
ps = [p for p in plevs_plot2 if p < 100000]
for T in moist_adiabats:
T_1000 = T = T + met.T00
moist_adiabat = []
# work backwards from 1000mb
for p in ps_blo:
T += met.dTdp_moist(T,p) * dp_plot
moist_adiabat.append(T - met.T00 + skew(p))
#reverse list order
moist_adiabat.reverse()
# insert 1000mb point
T = T_1000
moist_adiabat.append(T - met.T00)
# work forwards from 1000mb
for p in ps:
T -= met.dTdp_moist(T,p) * dp_plot
moist_adiabat.append(T - met.T00 + skew(p))
# draw labels
if (p == 22000):
if (T_1000 >= met.T00 and T_1000 <= 30+met.T00):
label(T-met.T00+skew(p),p/100,str(int(T_1000-met.T00)),'green', 0, axes)
if (int(T_1000 - met.T00) % 5 == 0):
axes.semilogy(moist_adiabat, plevs_plot2, basey=math.e, color = lc_major, linewidth = lw_major)
else:
axes.semilogy(moist_adiabat, plevs_plot2, basey=math.e, color = lc_minor, linewidth = lw_minor)
def draw_water_mix_ratio(axes):
"""Plot lines of constant water vapor mixing ratio on axes
:parameter axes: The axes to draw on
:type axes: :py:class:`matplotlib.axes`
This function calculates isolines of constant water vapor
mixing ratio and plots these lines. Values of w calculated
are given by the list variable w.
"""
#TODO: put w and the top plevel for plotting somewhere configurable
ps = [p for p in plevs if p>=20000 and p<=105000]
for W in mixing_ratios:
water_mix = []
for p in ps:
T = TMR(W,p/100.)
water_mix.append(T + skew(p))
axes.semilogy(water_mix, ps, basey=math.e, color = 'grey', linestyle = '--', linewidth = .5)
# Label the isoline
T = TMR(W,1075.)
label(T+skew(107500.), 1075, str(W), 'black', -15, axes)
def label_std_heights(axes):
"""Plot heights of standard pressure levels
:paramter axes: The axes to draw on
:type axes: :py:class:`matplotlib.axes`
"""
xpos = Tmin+1.5
std_heights = [(1000,111),(925,2512),(850,1457),(700,3012),(500,5574),(400,7185),
(300,9164),(250,10363),(200,11784),(150,13608)]#,(100,16180)]
for p,z in std_heights:
label(xpos, p, str(z), 'black', 0, axes)
def TMR(W, p):
# Computes temperature on mixing ratio w at pressure p.
# TMR in C, w in g/kg dry air, p in millibars.
# TODO: change this to something else?
x = np.log10(W * p / (622 + W))
TMR = 10 ** (0.0498646455 * x + 2.4082965) - 280.23475 + 38.9114 * ((10 ** (0.0915 * x) - 1.2035) ** 2)
return TMR
|
bsd-3-clause
|
sauloal/cnidaria
|
scripts/venv/lib/python2.7/site-packages/matplotlib/rcsetup.py
|
10
|
33270
|
"""
The rcsetup module contains the default values and the validation code for
customization using matplotlib's rc settings.
Each rc setting is assigned a default value and a function used to validate
any attempted changes to that setting. The default values and validation
functions are defined in the rcsetup module, and are used to construct the
rcParams global object which stores the settings and is referenced throughout
matplotlib.
These default values should be consistent with the default matplotlibrc file
that actually reflects the values given here. Any additions or deletions to the
parameter set listed here should also be visited to the
:file:`matplotlibrc.template` in matplotlib's root source directory.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import warnings
from matplotlib.fontconfig_pattern import parse_fontconfig_pattern
from matplotlib.colors import is_color_like
#interactive_bk = ['gtk', 'gtkagg', 'gtkcairo', 'qt4agg',
# 'tkagg', 'wx', 'wxagg', 'cocoaagg', 'webagg']
# The capitalized forms are needed for ipython at present; this may
# change for later versions.
interactive_bk = ['GTK', 'GTKAgg', 'GTKCairo', 'MacOSX',
'Qt4Agg', 'Qt5Agg', 'TkAgg', 'WX', 'WXAgg', 'CocoaAgg',
'GTK3Cairo', 'GTK3Agg', 'WebAgg', 'nbAgg']
non_interactive_bk = ['agg', 'cairo', 'emf', 'gdk',
'pdf', 'pgf', 'ps', 'svg', 'template']
all_backends = interactive_bk + non_interactive_bk
class ValidateInStrings:
def __init__(self, key, valid, ignorecase=False):
'valid is a list of legal strings'
self.key = key
self.ignorecase = ignorecase
def func(s):
if ignorecase:
return s.lower()
else:
return s
self.valid = dict([(func(k), k) for k in valid])
def __call__(self, s):
if self.ignorecase:
s = s.lower()
if s in self.valid:
return self.valid[s]
raise ValueError('Unrecognized %s string "%s": valid strings are %s'
% (self.key, s, list(six.itervalues(self.valid))))
def validate_any(s):
return s
def validate_path_exists(s):
"""If s is a path, return s, else False"""
if s is None:
return None
if os.path.exists(s):
return s
else:
raise RuntimeError('"%s" should be a path but it does not exist' % s)
def validate_bool(b):
"""Convert b to a boolean or raise"""
if isinstance(b, six.string_types):
b = b.lower()
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True):
return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False):
return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_bool_maybe_none(b):
'Convert b to a boolean or raise'
if isinstance(b, six.string_types):
b = b.lower()
if b is None or b == 'none':
return None
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True):
return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False):
return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_float(s):
"""convert s to float or raise"""
try:
return float(s)
except ValueError:
raise ValueError('Could not convert "%s" to float' % s)
def validate_float_or_None(s):
"""convert s to float or raise"""
if s is None:
return None
try:
return float(s)
except ValueError:
raise ValueError('Could not convert "%s" to float' % s)
def validate_int(s):
"""convert s to int or raise"""
try:
return int(s)
except ValueError:
raise ValueError('Could not convert "%s" to int' % s)
def validate_fonttype(s):
"""
confirm that this is a Postscript of PDF font type that we know how to
convert to
"""
fonttypes = {'type3': 3,
'truetype': 42}
try:
fonttype = validate_int(s)
except ValueError:
if s.lower() in six.iterkeys(fonttypes):
return fonttypes[s.lower()]
raise ValueError(
'Supported Postscript/PDF font types are %s' %
list(six.iterkeys(fonttypes)))
else:
if fonttype not in six.itervalues(fonttypes):
raise ValueError(
'Supported Postscript/PDF font types are %s' %
list(six.itervalues(fonttypes)))
return fonttype
_validate_standard_backends = ValidateInStrings('backend',
all_backends,
ignorecase=True)
def validate_backend(s):
if s.startswith('module://'):
return s
else:
return _validate_standard_backends(s)
validate_qt4 = ValidateInStrings('backend.qt4', ['PyQt4', 'PySide', 'PyQt4v2'])
validate_qt5 = ValidateInStrings('backend.qt5', ['PyQt5'])
def validate_toolbar(s):
validator = ValidateInStrings(
'toolbar',
['None', 'toolbar2'],
ignorecase=True)
return validator(s)
def validate_maskedarray(v):
# 2008/12/12: start warning; later, remove all traces of maskedarray
try:
if v == 'obsolete':
return v
except ValueError:
pass
warnings.warn('rcParams key "maskedarray" is obsolete and has no effect;\n'
' please delete it from your matplotlibrc file')
_seq_err_msg = ('You must supply exactly {n:d} values, you provided '
'{num:d} values: {s}')
_str_err_msg = ('You must supply exactly {n:d} comma-separated values, '
'you provided '
'{num:d} comma-separated values: {s}')
class validate_nseq_float(object):
def __init__(self, n):
self.n = n
def __call__(self, s):
"""return a seq of n floats or raise"""
if isinstance(s, six.string_types):
s = s.split(',')
err_msg = _str_err_msg
else:
err_msg = _seq_err_msg
if len(s) != self.n:
raise ValueError(err_msg.format(n=self.n, num=len(s), s=s))
try:
return [float(val) for val in s]
except ValueError:
raise ValueError('Could not convert all entries to floats')
class validate_nseq_int(object):
def __init__(self, n):
self.n = n
def __call__(self, s):
"""return a seq of n ints or raise"""
if isinstance(s, six.string_types):
s = s.split(',')
err_msg = _str_err_msg
else:
err_msg = _seq_err_msg
if len(s) != self.n:
raise ValueError(err_msg.format(n=self.n, num=len(s), s=s))
try:
return [int(val) for val in s]
except ValueError:
raise ValueError('Could not convert all entries to ints')
def validate_color(s):
'return a valid color arg'
try:
if s.lower() == 'none':
return 'None'
except AttributeError:
pass
if is_color_like(s):
return s
stmp = '#' + s
if is_color_like(stmp):
return stmp
# If it is still valid, it must be a tuple.
colorarg = s
msg = ''
if s.find(',') >= 0:
# get rid of grouping symbols
stmp = ''.join([c for c in s if c.isdigit() or c == '.' or c == ','])
vals = stmp.split(',')
if len(vals) != 3:
msg = '\nColor tuples must be length 3'
else:
try:
colorarg = [float(val) for val in vals]
except ValueError:
msg = '\nCould not convert all entries to floats'
if not msg and is_color_like(colorarg):
return colorarg
raise ValueError('%s does not look like a color arg%s' % (s, msg))
def validate_colorlist(s):
'return a list of colorspecs'
if isinstance(s, six.string_types):
return [validate_color(c.strip()) for c in s.split(',')]
else:
assert type(s) in [list, tuple]
return [validate_color(c) for c in s]
def validate_stringlist(s):
'return a list'
if isinstance(s, six.string_types):
return [six.text_type(v.strip()) for v in s.split(',') if v.strip()]
else:
assert type(s) in [list, tuple]
return [six.text_type(v) for v in s if v]
validate_orientation = ValidateInStrings(
'orientation', ['landscape', 'portrait'])
def validate_aspect(s):
if s in ('auto', 'equal'):
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid aspect specification')
def validate_fontsize(s):
if isinstance(s, six.string_types):
s = s.lower()
if s in ['xx-small', 'x-small', 'small', 'medium', 'large', 'x-large',
'xx-large', 'smaller', 'larger']:
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid font size')
def validate_font_properties(s):
parse_fontconfig_pattern(s)
return s
validate_fontset = ValidateInStrings(
'fontset',
['cm', 'stix', 'stixsans', 'custom'])
validate_mathtext_default = ValidateInStrings(
'default',
"rm cal it tt sf bf default bb frak circled scr regular".split())
validate_verbose = ValidateInStrings(
'verbose',
['silent', 'helpful', 'debug', 'debug-annoying'])
def deprecate_savefig_extension(value):
warnings.warn("savefig.extension is deprecated. Use savefig.format "
"instead. Will be removed in 1.4.x")
return value
def update_savefig_format(value):
# The old savefig.extension could also have a value of "auto", but
# the new savefig.format does not. We need to fix this here.
value = six.text_type(value)
if value == 'auto':
value = 'png'
return value
validate_ps_papersize = ValidateInStrings(
'ps_papersize',
['auto', 'letter', 'legal', 'ledger',
'a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10',
'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'b10',
], ignorecase=True)
def validate_ps_distiller(s):
if isinstance(s, six.string_types):
s = s.lower()
if s in ('none', None):
return None
elif s in ('false', False):
return False
elif s in ('ghostscript', 'xpdf'):
return s
else:
raise ValueError('matplotlibrc ps.usedistiller must either be none, '
'ghostscript or xpdf')
validate_joinstyle = ValidateInStrings('joinstyle',
['miter', 'round', 'bevel'],
ignorecase=True)
validate_capstyle = ValidateInStrings('capstyle',
['butt', 'round', 'projecting'],
ignorecase=True)
validate_negative_linestyle = ValidateInStrings('negative_linestyle',
['solid', 'dashed'],
ignorecase=True)
def validate_negative_linestyle_legacy(s):
try:
res = validate_negative_linestyle(s)
return res
except ValueError:
dashes = validate_nseq_float(2)(s)
warnings.warn("Deprecated negative_linestyle specification; use "
"'solid' or 'dashed'")
return (0, dashes) # (offset, (solid, blank))
def validate_tkpythoninspect(s):
# Introduced 2010/07/05
warnings.warn("tk.pythoninspect is obsolete, and has no effect")
return validate_bool(s)
validate_legend_loc = ValidateInStrings(
'legend_loc',
['best',
'upper right',
'upper left',
'lower left',
'lower right',
'right',
'center left',
'center right',
'lower center',
'upper center',
'center'], ignorecase=True)
def deprecate_svg_embed_char_paths(value):
warnings.warn("svg.embed_char_paths is deprecated. Use "
"svg.fonttype instead.")
validate_svg_fonttype = ValidateInStrings('svg.fonttype',
['none', 'path', 'svgfont'])
def validate_hinting(s):
if s in (True, False):
return s
if s.lower() in ('auto', 'native', 'either', 'none'):
return s.lower()
raise ValueError("hinting should be 'auto', 'native', 'either' or 'none'")
validate_pgf_texsystem = ValidateInStrings('pgf.texsystem',
['xelatex', 'lualatex', 'pdflatex'])
validate_movie_writer = ValidateInStrings('animation.writer',
['ffmpeg', 'ffmpeg_file',
'avconv', 'avconv_file',
'mencoder', 'mencoder_file',
'imagemagick', 'imagemagick_file'])
validate_movie_frame_fmt = ValidateInStrings('animation.frame_format',
['png', 'jpeg', 'tiff', 'raw', 'rgba'])
validate_axis_locator = ValidateInStrings('major', ['minor','both','major'])
def validate_bbox(s):
if isinstance(s, six.string_types):
s = s.lower()
if s == 'tight':
return s
if s == 'standard':
return None
raise ValueError("bbox should be 'tight' or 'standard'")
def validate_sketch(s):
if isinstance(s, six.string_types):
s = s.lower()
if s == 'none' or s is None:
return None
if isinstance(s, six.string_types):
result = tuple([float(v.strip()) for v in s.split(',')])
elif isinstance(s, (list, tuple)):
result = tuple([float(v) for v in s])
if len(result) != 3:
raise ValueError("path.sketch must be a tuple (scale, length, randomness)")
return result
class ValidateInterval:
"""
Value must be in interval
"""
def __init__(self, vmin, vmax, closedmin=True, closedmax=True):
self.vmin = vmin
self.vmax = vmax
self.cmin = closedmin
self.cmax = closedmax
def __call__(self, s):
try:
s = float(s)
except:
raise RuntimeError('Value must be a float; found "%s"' % s)
if self.cmin and s < self.vmin:
raise RuntimeError('Value must be >= %f; found "%f"' %
(self.vmin, s))
elif not self.cmin and s <= self.vmin:
raise RuntimeError('Value must be > %f; found "%f"' %
(self.vmin, s))
if self.cmax and s > self.vmax:
raise RuntimeError('Value must be <= %f; found "%f"' %
(self.vmax, s))
elif not self.cmax and s >= self.vmax:
raise RuntimeError('Value must be < %f; found "%f"' %
(self.vmax, s))
return s
# a map from key -> value, converter
defaultParams = {
'backend': ['Agg', validate_backend], # agg is certainly
# present
'backend_fallback': [True, validate_bool], # agg is certainly present
'backend.qt4': ['PyQt4', validate_qt4],
'backend.qt5': ['PyQt5', validate_qt5],
'webagg.port': [8988, validate_int],
'webagg.open_in_browser': [True, validate_bool],
'webagg.port_retries': [50, validate_int],
'nbagg.transparent': [True, validate_bool],
'toolbar': ['toolbar2', validate_toolbar],
'datapath': [None, validate_path_exists], # handled by
# _get_data_path_cached
'interactive': [False, validate_bool],
'timezone': ['UTC', six.text_type],
# the verbosity setting
'verbose.level': ['silent', validate_verbose],
'verbose.fileo': ['sys.stdout', six.text_type],
# line props
'lines.linewidth': [1.0, validate_float], # line width in points
'lines.linestyle': ['-', six.text_type], # solid line
'lines.color': ['b', validate_color], # blue
'lines.marker': ['None', six.text_type], # black
'lines.markeredgewidth': [0.5, validate_float],
'lines.markersize': [6, validate_float], # markersize, in points
'lines.antialiased': [True, validate_bool], # antialised (no jaggies)
'lines.dash_joinstyle': ['round', validate_joinstyle],
'lines.solid_joinstyle': ['round', validate_joinstyle],
'lines.dash_capstyle': ['butt', validate_capstyle],
'lines.solid_capstyle': ['projecting', validate_capstyle],
## patch props
'patch.linewidth': [1.0, validate_float], # line width in points
'patch.edgecolor': ['k', validate_color], # black
'patch.facecolor': ['b', validate_color], # blue
'patch.antialiased': [True, validate_bool], # antialised (no jaggies)
## font props
'font.family': [['sans-serif'], validate_stringlist], # used by text object
'font.style': ['normal', six.text_type],
'font.variant': ['normal', six.text_type],
'font.stretch': ['normal', six.text_type],
'font.weight': ['normal', six.text_type],
'font.size': [12, validate_float], # Base font size in points
'font.serif': [['Bitstream Vera Serif', 'DejaVu Serif',
'New Century Schoolbook', 'Century Schoolbook L',
'Utopia', 'ITC Bookman', 'Bookman',
'Nimbus Roman No9 L', 'Times New Roman',
'Times', 'Palatino', 'Charter', 'serif'],
validate_stringlist],
'font.sans-serif': [['Bitstream Vera Sans', 'DejaVu Sans',
'Lucida Grande', 'Verdana', 'Geneva', 'Lucid',
'Arial', 'Helvetica', 'Avant Garde', 'sans-serif'],
validate_stringlist],
'font.cursive': [['Apple Chancery', 'Textile', 'Zapf Chancery',
'Sand', 'cursive'], validate_stringlist],
'font.fantasy': [['Comic Sans MS', 'Chicago', 'Charcoal', 'Impact'
'Western', 'fantasy'], validate_stringlist],
'font.monospace': [['Bitstream Vera Sans Mono', 'DejaVu Sans Mono',
'Andale Mono', 'Nimbus Mono L', 'Courier New',
'Courier', 'Fixed', 'Terminal', 'monospace'],
validate_stringlist],
# text props
'text.color': ['k', validate_color], # black
'text.usetex': [False, validate_bool],
'text.latex.unicode': [False, validate_bool],
'text.latex.preamble': [[''], validate_stringlist],
'text.latex.preview': [False, validate_bool],
'text.dvipnghack': [None, validate_bool_maybe_none],
'text.hinting': [True, validate_hinting],
'text.hinting_factor': [8, validate_int],
'text.antialiased': [True, validate_bool],
'mathtext.cal': ['cursive', validate_font_properties],
'mathtext.rm': ['serif', validate_font_properties],
'mathtext.tt': ['monospace', validate_font_properties],
'mathtext.it': ['serif:italic', validate_font_properties],
'mathtext.bf': ['serif:bold', validate_font_properties],
'mathtext.sf': ['sans\-serif', validate_font_properties],
'mathtext.fontset': ['cm', validate_fontset],
'mathtext.default': ['it', validate_mathtext_default],
'mathtext.fallback_to_cm': [True, validate_bool],
'image.aspect': ['equal', validate_aspect], # equal, auto, a number
'image.interpolation': ['bilinear', six.text_type],
'image.cmap': ['jet', six.text_type], # one of gray, jet, etc
'image.lut': [256, validate_int], # lookup table
'image.origin': ['upper', six.text_type], # lookup table
'image.resample': [False, validate_bool],
'contour.negative_linestyle': ['dashed',
validate_negative_linestyle_legacy],
# axes props
'axes.axisbelow': [False, validate_bool],
'axes.hold': [True, validate_bool],
'axes.facecolor': ['w', validate_color], # background color; white
'axes.edgecolor': ['k', validate_color], # edge color; black
'axes.linewidth': [1.0, validate_float], # edge linewidth
'axes.titlesize': ['large', validate_fontsize], # fontsize of the
# axes title
'axes.titleweight': ['normal', six.text_type], # font weight of axes title
'axes.grid': [False, validate_bool], # display grid or not
'axes.grid.which': ['major', validate_axis_locator], # set wether the gid are by
# default draw on 'major'
# 'minor' or 'both' kind of
# axis locator
'axes.labelsize': ['medium', validate_fontsize], # fontsize of the
# x any y labels
'axes.labelweight': ['normal', six.text_type], # fontsize of the x any y labels
'axes.labelcolor': ['k', validate_color], # color of axis label
'axes.formatter.limits': [[-7, 7], validate_nseq_int(2)],
# use scientific notation if log10
# of the axis range is smaller than the
# first or larger than the second
'axes.formatter.use_locale': [False, validate_bool],
# Use the current locale to format ticks
'axes.formatter.use_mathtext': [False, validate_bool],
'axes.formatter.useoffset': [True, validate_bool],
'axes.unicode_minus': [True, validate_bool],
'axes.color_cycle': [['b', 'g', 'r', 'c', 'm', 'y', 'k'],
validate_colorlist], # cycle of plot
# line colors
'axes.xmargin': [0, ValidateInterval(0, 1,
closedmin=True,
closedmax=True)], # margin added to xaxis
'axes.ymargin': [0, ValidateInterval(0, 1,
closedmin=True,
closedmax=True)],# margin added to yaxis
'polaraxes.grid': [True, validate_bool], # display polar grid or
# not
'axes3d.grid': [True, validate_bool], # display 3d grid
#legend properties
'legend.fancybox': [False, validate_bool],
# at some point, legend.loc should be changed to 'best'
'legend.loc': ['upper right', validate_legend_loc],
# this option is internally ignored - it never served any useful purpose
'legend.isaxes': [True, validate_bool],
# the number of points in the legend line
'legend.numpoints': [2, validate_int],
# the number of points in the legend line for scatter
'legend.scatterpoints': [3, validate_int],
'legend.fontsize': ['large', validate_fontsize],
# the relative size of legend markers vs. original
'legend.markerscale': [1.0, validate_float],
'legend.shadow': [False, validate_bool],
# whether or not to draw a frame around legend
'legend.frameon': [True, validate_bool],
# alpha value of the legend frame
'legend.framealpha': [None, validate_float_or_None],
## the following dimensions are in fraction of the font size
'legend.borderpad': [0.4, validate_float], # units are fontsize
# the vertical space between the legend entries
'legend.labelspacing': [0.5, validate_float],
# the length of the legend lines
'legend.handlelength': [2., validate_float],
# the length of the legend lines
'legend.handleheight': [0.7, validate_float],
# the space between the legend line and legend text
'legend.handletextpad': [.8, validate_float],
# the border between the axes and legend edge
'legend.borderaxespad': [0.5, validate_float],
# the border between the axes and legend edge
'legend.columnspacing': [2., validate_float],
# the relative size of legend markers vs. original
'legend.markerscale': [1.0, validate_float],
'legend.shadow': [False, validate_bool],
## tick properties
'xtick.major.size': [4, validate_float], # major xtick size in points
'xtick.minor.size': [2, validate_float], # minor xtick size in points
'xtick.major.width': [0.5, validate_float], # major xtick width in points
'xtick.minor.width': [0.5, validate_float], # minor xtick width in points
'xtick.major.pad': [4, validate_float], # distance to label in points
'xtick.minor.pad': [4, validate_float], # distance to label in points
'xtick.color': ['k', validate_color], # color of the xtick labels
# fontsize of the xtick labels
'xtick.labelsize': ['medium', validate_fontsize],
'xtick.direction': ['in', six.text_type], # direction of xticks
'ytick.major.size': [4, validate_float], # major ytick size in points
'ytick.minor.size': [2, validate_float], # minor ytick size in points
'ytick.major.width': [0.5, validate_float], # major ytick width in points
'ytick.minor.width': [0.5, validate_float], # minor ytick width in points
'ytick.major.pad': [4, validate_float], # distance to label in points
'ytick.minor.pad': [4, validate_float], # distance to label in points
'ytick.color': ['k', validate_color], # color of the ytick labels
# fontsize of the ytick labels
'ytick.labelsize': ['medium', validate_fontsize],
'ytick.direction': ['in', six.text_type], # direction of yticks
'grid.color': ['k', validate_color], # grid color
'grid.linestyle': [':', six.text_type], # dotted
'grid.linewidth': [0.5, validate_float], # in points
'grid.alpha': [1.0, validate_float],
## figure props
# figure size in inches: width by height
'figure.figsize': [[8.0, 6.0], validate_nseq_float(2)],
'figure.dpi': [80, validate_float], # DPI
'figure.facecolor': ['0.75', validate_color], # facecolor; scalar gray
'figure.edgecolor': ['w', validate_color], # edgecolor; white
'figure.frameon': [True, validate_bool],
'figure.autolayout': [False, validate_bool],
'figure.max_open_warning': [20, validate_int],
'figure.subplot.left': [0.125, ValidateInterval(0, 1, closedmin=True,
closedmax=True)],
'figure.subplot.right': [0.9, ValidateInterval(0, 1, closedmin=True,
closedmax=True)],
'figure.subplot.bottom': [0.1, ValidateInterval(0, 1, closedmin=True,
closedmax=True)],
'figure.subplot.top': [0.9, ValidateInterval(0, 1, closedmin=True,
closedmax=True)],
'figure.subplot.wspace': [0.2, ValidateInterval(0, 1, closedmin=True,
closedmax=False)],
'figure.subplot.hspace': [0.2, ValidateInterval(0, 1, closedmin=True,
closedmax=False)],
## Saving figure's properties
'savefig.dpi': [100, validate_float], # DPI
'savefig.facecolor': ['w', validate_color], # facecolor; white
'savefig.edgecolor': ['w', validate_color], # edgecolor; white
'savefig.frameon': [True, validate_bool],
'savefig.orientation': ['portrait', validate_orientation], # edgecolor;
#white
'savefig.jpeg_quality': [95, validate_int],
# what to add to extensionless filenames
'savefig.extension': ['png', deprecate_savefig_extension],
# value checked by backend at runtime
'savefig.format': ['png', update_savefig_format],
# options are 'tight', or 'standard'. 'standard' validates to None.
'savefig.bbox': [None, validate_bbox],
'savefig.pad_inches': [0.1, validate_float],
# default directory in savefig dialog box
'savefig.directory': ['~', six.text_type],
'savefig.transparent': [False, validate_bool],
# Maintain shell focus for TkAgg
'tk.window_focus': [False, validate_bool],
'tk.pythoninspect': [False, validate_tkpythoninspect], # obsolete
# Set the papersize/type
'ps.papersize': ['letter', validate_ps_papersize],
'ps.useafm': [False, validate_bool], # Set PYTHONINSPECT
# use ghostscript or xpdf to distill ps output
'ps.usedistiller': [False, validate_ps_distiller],
'ps.distiller.res': [6000, validate_int], # dpi
'ps.fonttype': [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
# compression level from 0 to 9; 0 to disable
'pdf.compression': [6, validate_int],
# ignore any color-setting commands from the frontend
'pdf.inheritcolor': [False, validate_bool],
# use only the 14 PDF core fonts embedded in every PDF viewing application
'pdf.use14corefonts': [False, validate_bool],
'pdf.fonttype': [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'pgf.debug': [False, validate_bool], # output debug information
# choose latex application for creating pdf files (xelatex/lualatex)
'pgf.texsystem': ['xelatex', validate_pgf_texsystem],
# use matplotlib rc settings for font configuration
'pgf.rcfonts': [True, validate_bool],
# provide a custom preamble for the latex process
'pgf.preamble': [[''], validate_stringlist],
# write raster image data directly into the svg file
'svg.image_inline': [True, validate_bool],
# suppress scaling of raster data embedded in SVG
'svg.image_noscale': [False, validate_bool],
# True to save all characters as paths in the SVG
'svg.embed_char_paths': [True, deprecate_svg_embed_char_paths],
'svg.fonttype': ['path', validate_svg_fonttype],
# set this when you want to generate hardcopy docstring
'docstring.hardcopy': [False, validate_bool],
# where plugin directory is locate
'plugins.directory': ['.matplotlib_plugins', six.text_type],
'path.simplify': [True, validate_bool],
'path.simplify_threshold': [1.0 / 9.0, ValidateInterval(0.0, 1.0)],
'path.snap': [True, validate_bool],
'path.sketch': [None, validate_sketch],
'path.effects': [[], validate_any],
'agg.path.chunksize': [0, validate_int], # 0 to disable chunking;
# key-mappings (multi-character mappings should be a list/tuple)
'keymap.fullscreen': [('f', 'ctrl+f'), validate_stringlist],
'keymap.home': [['h', 'r', 'home'], validate_stringlist],
'keymap.back': [['left', 'c', 'backspace'], validate_stringlist],
'keymap.forward': [['right', 'v'], validate_stringlist],
'keymap.pan': [['p'], validate_stringlist],
'keymap.zoom': [['o'], validate_stringlist],
'keymap.save': [['s', 'ctrl+s'], validate_stringlist],
'keymap.quit': [['ctrl+w', 'cmd+w'], validate_stringlist],
'keymap.grid': [['g'], validate_stringlist],
'keymap.yscale': [['l'], validate_stringlist],
'keymap.xscale': [['k', 'L'], validate_stringlist],
'keymap.all_axes': [['a'], validate_stringlist],
# sample data
'examples.directory': ['', six.text_type],
# Animation settings
'animation.writer': ['ffmpeg', validate_movie_writer],
'animation.codec': ['mpeg4', six.text_type],
'animation.bitrate': [-1, validate_int],
# Controls image format when frames are written to disk
'animation.frame_format': ['png', validate_movie_frame_fmt],
# Path to FFMPEG binary. If just binary name, subprocess uses $PATH.
'animation.ffmpeg_path': ['ffmpeg', six.text_type],
# Additional arguments for ffmpeg movie writer (using pipes)
'animation.ffmpeg_args': [[], validate_stringlist],
# Path to AVConv binary. If just binary name, subprocess uses $PATH.
'animation.avconv_path': ['avconv', six.text_type],
# Additional arguments for avconv movie writer (using pipes)
'animation.avconv_args': [[], validate_stringlist],
# Path to MENCODER binary. If just binary name, subprocess uses $PATH.
'animation.mencoder_path': ['mencoder', six.text_type],
# Additional arguments for mencoder movie writer (using pipes)
'animation.mencoder_args': [[], validate_stringlist],
# Path to convert binary. If just binary name, subprocess uses $PATH
'animation.convert_path': ['convert', six.text_type],
# Additional arguments for mencoder movie writer (using pipes)
'animation.convert_args': [[], validate_stringlist]}
if __name__ == '__main__':
rc = defaultParams
rc['datapath'][0] = '/'
for key in rc:
if not rc[key][1](rc[key][0]) == rc[key][0]:
print("%s: %s != %s" % (key, rc[key][1](rc[key][0]), rc[key][0]))
|
mit
|
cavestruz/L500analysis
|
plotting/profiles/T_evolution/T_halo_overdensity/Tnt/plot_Tnt_mean.py
|
1
|
3362
|
from L500analysis.data_io.get_cluster_data import GetClusterData
from L500analysis.utils.utils import aexp2redshift,redshift2aexp
from L500analysis.plotting.tools.figure_formatting import *
from L500analysis.plotting.profiles.tools.profiles_percentile \
import *
from L500analysis.utils.constants import rbins
from derived_field_functions import *
color = matplotlib.cm.afmhot_r
matplotlib.rcParams['legend.handlelength'] = 0
matplotlib.rcParams['legend.numpoints'] = 1
matplotlib.rcParams['legend.fontsize'] = 12
aexps = [1.0,redshift2aexp(0.3),redshift2aexp(0.5),redshift2aexp(0.7),redshift2aexp(0.85),redshift2aexp(1.0)]
db_name = 'L500_NR_0'
db_dir = '/home/babyostrich/Documents/Repos/L500analysis/'
profiles_list = ['r_mid',
'M_gas', 'M_dark', 'M_star',
'vel_gas_rad_avg',
'vel_gas_rad_std',
'vel_gas_tan_avg',
'vel_gas_tan_std',
'T_nt/T200m',
'T_nt/T500m',
'T_nt/T1600m',
'R/R200m',
'R/R500m',
'R/R1600m',
]
halo_properties_list=['r200m','M_total_200m']
fTz200m=r"$\tilde{T}_{200m}/\tilde{T}(z=0)$"
fTz500m=r"$\tilde{T}_{500m}/\tilde{T}(z=0)$"
fTz1600m=r"$\tilde{T}_{1600m}/\tilde{T}(z=0)$"
axes_labels = [fTz200m,fTz500m,fTz1600m]
text_xlocs = [.125,0.2,0.2]
pa = PlotAxes(figname='fTnt_deltam',use_axes_labels=False,
axes=[[0.15,0.65,0.80,0.2],[0.15,0.4,0.80,0.2],[0.15,0.15,0.80,0.2]],
axes_labels=axes_labels,
xlabel=r"$R/R_{\Delta}$",
xlims=[(0.06,0.94),(0.1,1.39),(0.01,2.49)],
ylims=[(0.7,1.2),(0.7,1.2),(0.7,1.2)])
fT1 = {}
fT2 = {}
fT3 = {}
Tplots = [fT1, fT2, fT3]
deltas = ['200m','500m','1600m']
clkeys = ['T_nt/T'+delta for delta in deltas]
linestyles = ['-']
for aexp in aexps :
cldata = GetClusterData(aexp=aexp,db_name=db_name,
db_dir=db_dir,
profiles_list=profiles_list,
halo_properties_list=halo_properties_list)
# Collect average profiles at each z
for Tplot, clkey in zip(Tplots,clkeys) :
Tplot[aexp] = calculate_profiles_mean_variance(cldata[clkey])
for aexp in aexps :
for T, axes_label in zip(Tplots,axes_labels) :
fractional_evolution = get_profiles_division_mean_variance(
mean_profile1=T[aexp]['mean'],
var_profile1=T[aexp]['var'],
mean_profile2=T[1.0]['mean'],
var_profile2=T[1.0]['var'],
)
pa.axes[axes_label].plot( rbins, fractional_evolution['mean'],
color=color(aexp),ls=':',label="$z=%3.1f$" % aexp2redshift(aexp))
for axes_label,delta,xloc in zip(axes_labels,deltas,text_xlocs) :
print axes_label, delta
pa.axes[axes_label].tick_params(labelsize=12)
pa.axes[axes_label].set_yticks(arange(0.7,1.2,0.2))
pa.axes[axes_label].text(xloc,1.05,'$\\Delta='+delta+'$')
pa.axes[axes_labels[1]].set_ylabel("$\\tilde{T}_{nt}/\\tilde{T}_{nt}(z=0)$",
fontsize="xx-large")
pa.set_legend(axes_label=axes_labels[0],ncol=3,loc='upper right', frameon=False)
pa.color_legend_texts(axes_label=axes_labels[0])
pa.savefig()
|
mit
|
datapythonista/pandas
|
pandas/tests/tseries/offsets/test_ticks.py
|
4
|
10779
|
"""
Tests for offsets.Tick and subclasses
"""
from datetime import (
datetime,
timedelta,
)
from hypothesis import (
assume,
example,
given,
settings,
strategies as st,
)
import numpy as np
import pytest
from pandas._libs.tslibs.offsets import delta_to_tick
from pandas import (
Timedelta,
Timestamp,
)
import pandas._testing as tm
from pandas.tests.tseries.offsets.common import assert_offset_equal
from pandas.tseries import offsets
from pandas.tseries.offsets import (
Hour,
Micro,
Milli,
Minute,
Nano,
Second,
)
# ---------------------------------------------------------------------
# Test Helpers
tick_classes = [Hour, Minute, Second, Milli, Micro, Nano]
# ---------------------------------------------------------------------
def test_apply_ticks():
result = offsets.Hour(3).apply(offsets.Hour(4))
exp = offsets.Hour(7)
assert result == exp
def test_delta_to_tick():
delta = timedelta(3)
tick = delta_to_tick(delta)
assert tick == offsets.Day(3)
td = Timedelta(nanoseconds=5)
tick = delta_to_tick(td)
assert tick == Nano(5)
@pytest.mark.parametrize("cls", tick_classes)
@settings(deadline=None) # GH 24641
@example(n=2, m=3)
@example(n=800, m=300)
@example(n=1000, m=5)
@given(n=st.integers(-999, 999), m=st.integers(-999, 999))
def test_tick_add_sub(cls, n, m):
# For all Tick subclasses and all integers n, m, we should have
# tick(n) + tick(m) == tick(n+m)
# tick(n) - tick(m) == tick(n-m)
left = cls(n)
right = cls(m)
expected = cls(n + m)
assert left + right == expected
assert left.apply(right) == expected
expected = cls(n - m)
assert left - right == expected
@pytest.mark.arm_slow
@pytest.mark.parametrize("cls", tick_classes)
@settings(deadline=None)
@example(n=2, m=3)
@given(n=st.integers(-999, 999), m=st.integers(-999, 999))
def test_tick_equality(cls, n, m):
assume(m != n)
# tick == tock iff tick.n == tock.n
left = cls(n)
right = cls(m)
assert left != right
assert not (left == right)
right = cls(n)
assert left == right
assert not (left != right)
if n != 0:
assert cls(n) != cls(-n)
# ---------------------------------------------------------------------
def test_Hour():
assert_offset_equal(Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 1))
assert_offset_equal(Hour(-1), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
assert_offset_equal(2 * Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 2))
assert_offset_equal(-1 * Hour(), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
assert Hour(3) + Hour(2) == Hour(5)
assert Hour(3) - Hour(2) == Hour()
assert Hour(4) != Hour(1)
def test_Minute():
assert_offset_equal(Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 1))
assert_offset_equal(Minute(-1), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))
assert_offset_equal(2 * Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 2))
assert_offset_equal(-1 * Minute(), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))
assert Minute(3) + Minute(2) == Minute(5)
assert Minute(3) - Minute(2) == Minute()
assert Minute(5) != Minute()
def test_Second():
assert_offset_equal(Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 1))
assert_offset_equal(Second(-1), datetime(2010, 1, 1, 0, 0, 1), datetime(2010, 1, 1))
assert_offset_equal(
2 * Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 2)
)
assert_offset_equal(
-1 * Second(), datetime(2010, 1, 1, 0, 0, 1), datetime(2010, 1, 1)
)
assert Second(3) + Second(2) == Second(5)
assert Second(3) - Second(2) == Second()
def test_Millisecond():
assert_offset_equal(
Milli(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 1000)
)
assert_offset_equal(
Milli(-1), datetime(2010, 1, 1, 0, 0, 0, 1000), datetime(2010, 1, 1)
)
assert_offset_equal(
Milli(2), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2000)
)
assert_offset_equal(
2 * Milli(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2000)
)
assert_offset_equal(
-1 * Milli(), datetime(2010, 1, 1, 0, 0, 0, 1000), datetime(2010, 1, 1)
)
assert Milli(3) + Milli(2) == Milli(5)
assert Milli(3) - Milli(2) == Milli()
def test_MillisecondTimestampArithmetic():
assert_offset_equal(
Milli(), Timestamp("2010-01-01"), Timestamp("2010-01-01 00:00:00.001")
)
assert_offset_equal(
Milli(-1), Timestamp("2010-01-01 00:00:00.001"), Timestamp("2010-01-01")
)
def test_Microsecond():
assert_offset_equal(Micro(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 1))
assert_offset_equal(
Micro(-1), datetime(2010, 1, 1, 0, 0, 0, 1), datetime(2010, 1, 1)
)
assert_offset_equal(
2 * Micro(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2)
)
assert_offset_equal(
-1 * Micro(), datetime(2010, 1, 1, 0, 0, 0, 1), datetime(2010, 1, 1)
)
assert Micro(3) + Micro(2) == Micro(5)
assert Micro(3) - Micro(2) == Micro()
def test_NanosecondGeneric():
timestamp = Timestamp(datetime(2010, 1, 1))
assert timestamp.nanosecond == 0
result = timestamp + Nano(10)
assert result.nanosecond == 10
reverse_result = Nano(10) + timestamp
assert reverse_result.nanosecond == 10
def test_Nanosecond():
timestamp = Timestamp(datetime(2010, 1, 1))
assert_offset_equal(Nano(), timestamp, timestamp + np.timedelta64(1, "ns"))
assert_offset_equal(Nano(-1), timestamp + np.timedelta64(1, "ns"), timestamp)
assert_offset_equal(2 * Nano(), timestamp, timestamp + np.timedelta64(2, "ns"))
assert_offset_equal(-1 * Nano(), timestamp + np.timedelta64(1, "ns"), timestamp)
assert Nano(3) + Nano(2) == Nano(5)
assert Nano(3) - Nano(2) == Nano()
# GH9284
assert Nano(1) + Nano(10) == Nano(11)
assert Nano(5) + Micro(1) == Nano(1005)
assert Micro(5) + Nano(1) == Nano(5001)
@pytest.mark.parametrize(
"kls, expected",
[
(Hour, Timedelta(hours=5)),
(Minute, Timedelta(hours=2, minutes=3)),
(Second, Timedelta(hours=2, seconds=3)),
(Milli, Timedelta(hours=2, milliseconds=3)),
(Micro, Timedelta(hours=2, microseconds=3)),
(Nano, Timedelta(hours=2, nanoseconds=3)),
],
)
def test_tick_addition(kls, expected):
offset = kls(3)
result = offset + Timedelta(hours=2)
assert isinstance(result, Timedelta)
assert result == expected
@pytest.mark.parametrize("cls", tick_classes)
def test_tick_division(cls):
off = cls(10)
assert off / cls(5) == 2
assert off / 2 == cls(5)
assert off / 2.0 == cls(5)
assert off / off.delta == 1
assert off / off.delta.to_timedelta64() == 1
assert off / Nano(1) == off.delta / Nano(1).delta
if cls is not Nano:
# A case where we end up with a smaller class
result = off / 1000
assert isinstance(result, offsets.Tick)
assert not isinstance(result, cls)
assert result.delta == off.delta / 1000
if cls._nanos_inc < Timedelta(seconds=1).value:
# Case where we end up with a bigger class
result = off / 0.001
assert isinstance(result, offsets.Tick)
assert not isinstance(result, cls)
assert result.delta == off.delta / 0.001
def test_tick_mul_float():
off = Micro(2)
# Case where we retain type
result = off * 1.5
expected = Micro(3)
assert result == expected
assert isinstance(result, Micro)
# Case where we bump up to the next type
result = off * 1.25
expected = Nano(2500)
assert result == expected
assert isinstance(result, Nano)
@pytest.mark.parametrize("cls", tick_classes)
def test_tick_rdiv(cls):
off = cls(10)
delta = off.delta
td64 = delta.to_timedelta64()
instance__type = ".".join([cls.__module__, cls.__name__])
msg = (
"unsupported operand type\\(s\\) for \\/: 'int'|'float' and "
f"'{instance__type}'"
)
with pytest.raises(TypeError, match=msg):
2 / off
with pytest.raises(TypeError, match=msg):
2.0 / off
assert (td64 * 2.5) / off == 2.5
if cls is not Nano:
# skip pytimedelta for Nano since it gets dropped
assert (delta.to_pytimedelta() * 2) / off == 2
result = np.array([2 * td64, td64]) / off
expected = np.array([2.0, 1.0])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("cls1", tick_classes)
@pytest.mark.parametrize("cls2", tick_classes)
def test_tick_zero(cls1, cls2):
assert cls1(0) == cls2(0)
assert cls1(0) + cls2(0) == cls1(0)
if cls1 is not Nano:
assert cls1(2) + cls2(0) == cls1(2)
if cls1 is Nano:
assert cls1(2) + Nano(0) == cls1(2)
@pytest.mark.parametrize("cls", tick_classes)
def test_tick_equalities(cls):
assert cls() == cls(1)
@pytest.mark.parametrize("cls", tick_classes)
def test_tick_offset(cls):
assert not cls().is_anchored()
@pytest.mark.parametrize("cls", tick_classes)
def test_compare_ticks(cls):
three = cls(3)
four = cls(4)
assert three < cls(4)
assert cls(3) < four
assert four > cls(3)
assert cls(4) > three
assert cls(3) == cls(3)
assert cls(3) != cls(4)
@pytest.mark.parametrize("cls", tick_classes)
def test_compare_ticks_to_strs(cls):
# GH#23524
off = cls(19)
# These tests should work with any strings, but we particularly are
# interested in "infer" as that comparison is convenient to make in
# Datetime/Timedelta Array/Index constructors
assert not off == "infer"
assert not "foo" == off
instance_type = ".".join([cls.__module__, cls.__name__])
msg = (
"'<'|'<='|'>'|'>=' not supported between instances of "
f"'str' and '{instance_type}'|'{instance_type}' and 'str'"
)
for left, right in [("infer", off), (off, "infer")]:
with pytest.raises(TypeError, match=msg):
left < right
with pytest.raises(TypeError, match=msg):
left <= right
with pytest.raises(TypeError, match=msg):
left > right
with pytest.raises(TypeError, match=msg):
left >= right
@pytest.mark.parametrize("cls", tick_classes)
def test_compare_ticks_to_timedeltalike(cls):
off = cls(19)
td = off.delta
others = [td, td.to_timedelta64()]
if cls is not Nano:
others.append(td.to_pytimedelta())
for other in others:
assert off == other
assert not off != other
assert not off < other
assert not off > other
assert off <= other
assert off >= other
|
bsd-3-clause
|
robbymeals/scikit-learn
|
sklearn/neighbors/tests/test_neighbors.py
|
103
|
41083
|
from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([0.0], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[])
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError,
nbrs.kneighbors_graph,
X, mode='blah')
assert_raises(ValueError,
nbrs.radius_neighbors_graph,
X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph,
nbrs1.radius_neighbors_graph(X).toarray())
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
|
bsd-3-clause
|
hugobowne/scikit-learn
|
sklearn/linear_model/tests/test_ransac.py
|
52
|
17482
|
from scipy import sparse
import numpy as np
from scipy import sparse
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_almost_equal
from sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0)
assert_raises_regexp(ValueError,
"No inliers.*residual_threshold.*0\.0",
ransac_estimator.fit, X, y)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# XXX: Remove in 0.20
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
assert_warns(DeprecationWarning, ransac_estimator1.fit, X, yyy)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_residual_loss():
loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
loss_mono = lambda y_true, y_pred : np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss="squared_loss")
ransac_estimator3.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, weights)
# sanity check
assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0), axis=0)
y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0))
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight)
assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if base_estimator.fit doesn't support
# sample_weight, raises error
base_estimator = Lasso()
ransac_estimator = RANSACRegressor(base_estimator)
assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
|
bsd-3-clause
|
nzavagli/UnrealPy
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/numpy-1.9.2/numpy/core/function_base.py
|
23
|
6262
|
from __future__ import division, absolute_import, print_function
__all__ = ['logspace', 'linspace']
from . import numeric as _nx
from .numeric import array, result_type
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop` ].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
# Convert float/complex array scalars to float, gh-3504
start = start * 1.
stop = stop * 1.
if dtype is None:
dtype = result_type(start, stop, float(num))
if num <= 0:
return array([], dtype)
if endpoint:
if num == 1:
return array([start], dtype=dtype)
step = (stop-start)/float((num-1))
y = _nx.arange(0, num, dtype=dtype) * step + start
y[-1] = stop
else:
step = (stop-start)/float(num)
y = _nx.arange(0, num, dtype=dtype) * step + start
if retstep:
return y.astype(dtype), step
else:
return y.astype(dtype)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
|
mit
|
CompPhysics/ThesisProjects
|
doc/MSc/msc_students/former/sean/Thesis/Codes/PythonCode/MBPT4.py
|
1
|
4664
|
from sympy import *
from pylab import *
import matplotlib.pyplot as plt
#below_fermi = (0,1,2,3)
#above_fermi = (4,5,6,7)
#states = [(1,1),(1,-1),(2,1),(2,-1),(3,1),(3,-1),(4,1),(4,-1)]
N = 4
NB = 8
g = Symbol('g')
#linear in energy (not box potential)
def makeStateSpace(N,NB): #N = num of particles, NB = size of basis (levels, each level has two spin states)
states = []
for i in range(1,NB/2+1):
states.append((i,1))
states.append((i,-1))
below_fermi = range(0,N)
above_fermi = range(N, NB)
return tuple(states), tuple(below_fermi), tuple(above_fermi)
states, below_fermi, above_fermi = makeStateSpace(N,NB)
#print states
def h0(p,q):
if p == q:
p1, s1 = states[p]
return (p1 - 1)
else:
return 0
def f(p,q):
if p == q:
return 0
s = h0(p,q)
for i in below_fermi:
s += assym(p,i,q,i)
return s
def assym(p,q,r,s):
p1, s1 = states[p]
p2, s2 = states[q]
p3, s3 = states[r]
p4, s4 = states[s]
if p1 != p2 or p3 != p4:
return 0
if s1 == s2 or s3 == s4:
return 0
if s1 == s3 and s2 == s4:
return -g/2.
if s1 == s4 and s2 == s3:
return g/2.
def eps(holes, particles):
E = 0
for h in holes:
p, s = states[h]
E += (p-1)
for p in particles:
p, s = states[p]
E -= (p-1)
return E
#Want to do these calculations with einsum?
#This is problem 8.10 (pairing interaction) in lnb.pdf, pdf page 299. See 8.7.1, pdf page 269, for comments
#We want ewald potential for HEG, eq. 2.14 in Audun's task, page 89
# Diagram 1
s1 = 0
for a in above_fermi:
for b in above_fermi:
for i in below_fermi:
for j in below_fermi:
s1 += 0.25*assym(a,b,i,j)*assym(i,j,a,b)/eps((i,j),(a,b))
# Diagram 3
s3 = 0
for a in above_fermi:
for b in above_fermi:
for c in above_fermi:
for i in below_fermi:
for j in below_fermi:
for k in below_fermi:
s3 += assym(i,j,a,b)*assym(a,c,j,k)*assym(b,k,c,i)/eps((i,j),(a,b))/eps((k,j),(a,c))
# Diagram 4
s4 = 0
for a in above_fermi:
for b in above_fermi:
for c in above_fermi:
for d in above_fermi:
for i in below_fermi:
for j in below_fermi:
s4 += 0.125*assym(i,j,a,b)*assym(a,b,c,d)*assym(c,d,i,j)/eps((i,j),(a,b))/eps((i,j),(c,d))
# Diagram 5
s5 = 0
for a in above_fermi:
for b in above_fermi:
for i in below_fermi:
for j in below_fermi:
for k in below_fermi:
for l in below_fermi:
s5 += 0.125*assym(i,j,a,b)*assym(k,l,i,j)*assym(a,b,k,l)/eps((i,j),(a,b))/eps((k,l),(a,b))
# Diagram 8
s8 = 0
for a in above_fermi:
for b in above_fermi:
for i in below_fermi:
for j in below_fermi:
for k in below_fermi:
s8 -= 0.5*assym(i,j,a,b)*assym(a,b,i,k)*f(k,j)/eps((i,j),(a,b))/eps((i,k),(a,b))
# Diagram 9
s9 = 0
for a in above_fermi:
for b in above_fermi:
for c in above_fermi:
for i in below_fermi:
for j in below_fermi:
s9 += 0.5*assym(i,j,a,b)*assym(a,c,i,j)*f(b,c)/eps((i,j),(a,b))/eps((i,j),(a,c))
s_5 = -0.0291521990740741*g**4
s14 = -0.0308883101851853*g**4
s34 = 0.0163049768518519*g**4
s36 = -0.0145760995370371*g**4
s38 = -0.0201099537037037*g**4
s39 = 0.0176938657407407*g**4
ga = linspace(-1,1,5)
e1 = []
corrCCD = [-0.21895, -0.06306, 0.0, -0.08336, -0.36956]
corr4 = []
for g_val in ga:
H1 = matrix([[2-g_val , -g_val/2., -g_val/2., -g_val/2., -g_val/2., 0],
[-g_val/2., 4-g_val, -g_val/2., -g_val/2., 0., -g_val/2.],
[-g_val/2., -g_val/2., 6-g_val, 0, -g_val/2., -g_val/2.],
[-g_val/2., -g_val/2., 0, 6-g_val, -g_val/2., -g_val/2.],
[-g_val/2., 0, -g_val/2., -g_val/2., 8-g_val, -g_val/2.],
[0 , -g_val/2., -g_val/2., -g_val/2., -g_val/2., 10-g_val]])
u1, v1 = linalg.eig(H1)
e1.append(min(u1))
corr4.append((s1+s4+s5+2*s_5+2*s14+2*s34+2*s36+s38+2*s39).subs(g,g_val))
exact = e1 - (2-ga)
plt.axis([-1,1,-0.5,0.05])
plt.xlabel(r'Interaction strength, $g$', fontsize=16)
plt.ylabel(r'Correlation energy', fontsize=16)
exact = plt.plot(ga, exact,'b-*',linewidth = 2.0, label = 'Exact')
mbpt4 = plt.plot(ga, corr4,'r:.', linewidth = 2.0, label = 'MBPT4')
ccd = plt.plot(ga, corrCCD, 'm:v',linewidth = 2.0, label = 'CCD')
plt.legend()
plt.savefig('CCDMBPT4theory.pdf', format='pdf')
plt.show()
|
cc0-1.0
|
humdings/zipline
|
tests/utils/test_pandas_utils.py
|
3
|
6827
|
"""
Tests for zipline/utils/pandas_utils.py
"""
import pandas as pd
from zipline.testing import parameter_space, ZiplineTestCase
from zipline.testing.predicates import assert_equal
from zipline.utils.pandas_utils import (
categorical_df_concat,
nearest_unequal_elements
)
class TestNearestUnequalElements(ZiplineTestCase):
@parameter_space(tz=['UTC', 'US/Eastern'], __fail_fast=True)
def test_nearest_unequal_elements(self, tz):
dts = pd.to_datetime(
['2014-01-01', '2014-01-05', '2014-01-06', '2014-01-09'],
).tz_localize(tz)
def t(s):
return None if s is None else pd.Timestamp(s, tz=tz)
for dt, before, after in (('2013-12-30', None, '2014-01-01'),
('2013-12-31', None, '2014-01-01'),
('2014-01-01', None, '2014-01-05'),
('2014-01-02', '2014-01-01', '2014-01-05'),
('2014-01-03', '2014-01-01', '2014-01-05'),
('2014-01-04', '2014-01-01', '2014-01-05'),
('2014-01-05', '2014-01-01', '2014-01-06'),
('2014-01-06', '2014-01-05', '2014-01-09'),
('2014-01-07', '2014-01-06', '2014-01-09'),
('2014-01-08', '2014-01-06', '2014-01-09'),
('2014-01-09', '2014-01-06', None),
('2014-01-10', '2014-01-09', None),
('2014-01-11', '2014-01-09', None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
@parameter_space(tz=['UTC', 'US/Eastern'], __fail_fast=True)
def test_nearest_unequal_elements_short_dts(self, tz):
# Length 1.
dts = pd.to_datetime(['2014-01-01']).tz_localize(tz)
def t(s):
return None if s is None else pd.Timestamp(s, tz=tz)
for dt, before, after in (('2013-12-31', None, '2014-01-01'),
('2014-01-01', None, None),
('2014-01-02', '2014-01-01', None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
# Length 0
dts = pd.to_datetime([]).tz_localize(tz)
for dt, before, after in (('2013-12-31', None, None),
('2014-01-01', None, None),
('2014-01-02', None, None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
def test_nearest_unequal_bad_input(self):
with self.assertRaises(ValueError) as e:
nearest_unequal_elements(
pd.to_datetime(['2014', '2014']),
pd.Timestamp('2014'),
)
self.assertEqual(str(e.exception), 'dts must be unique')
with self.assertRaises(ValueError) as e:
nearest_unequal_elements(
pd.to_datetime(['2014', '2013']),
pd.Timestamp('2014'),
)
self.assertEqual(
str(e.exception),
'dts must be sorted in increasing order',
)
class TestCatDFConcat(ZiplineTestCase):
def test_categorical_df_concat(self):
inp = [
pd.DataFrame(
{
'A': pd.Series(['a', 'b', 'c'], dtype='category'),
'B': pd.Series([100, 102, 103], dtype='int64'),
'C': pd.Series(['x', 'x', 'x'], dtype='category'),
}
),
pd.DataFrame(
{
'A': pd.Series(['c', 'b', 'd'], dtype='category'),
'B': pd.Series([103, 102, 104], dtype='int64'),
'C': pd.Series(['y', 'y', 'y'], dtype='category'),
}
),
pd.DataFrame(
{
'A': pd.Series(['a', 'b', 'd'], dtype='category'),
'B': pd.Series([101, 102, 104], dtype='int64'),
'C': pd.Series(['z', 'z', 'z'], dtype='category'),
}
),
]
result = categorical_df_concat(inp)
expected = pd.DataFrame(
{
'A': pd.Series(
['a', 'b', 'c', 'c', 'b', 'd', 'a', 'b', 'd'],
dtype='category'
),
'B': pd.Series(
[100, 102, 103, 103, 102, 104, 101, 102, 104],
dtype='int64'
),
'C': pd.Series(
['x', 'x', 'x', 'y', 'y', 'y', 'z', 'z', 'z'],
dtype='category'
),
},
)
expected.index = pd.Int64Index([0, 1, 2, 0, 1, 2, 0, 1, 2])
assert_equal(expected, result)
assert_equal(
expected['A'].cat.categories,
result['A'].cat.categories
)
assert_equal(
expected['C'].cat.categories,
result['C'].cat.categories
)
def test_categorical_df_concat_value_error(self):
mismatched_dtypes = [
pd.DataFrame(
{
'A': pd.Series(['a', 'b', 'c'], dtype='category'),
'B': pd.Series([100, 102, 103], dtype='int64'),
}
),
pd.DataFrame(
{
'A': pd.Series(['c', 'b', 'd'], dtype='category'),
'B': pd.Series([103, 102, 104], dtype='float64'),
}
),
]
mismatched_column_names = [
pd.DataFrame(
{
'A': pd.Series(['a', 'b', 'c'], dtype='category'),
'B': pd.Series([100, 102, 103], dtype='int64'),
}
),
pd.DataFrame(
{
'A': pd.Series(['c', 'b', 'd'], dtype='category'),
'X': pd.Series([103, 102, 104], dtype='int64'),
}
),
]
with self.assertRaises(ValueError) as cm:
categorical_df_concat(mismatched_dtypes)
self.assertEqual(
str(cm.exception),
"Input DataFrames must have the same columns/dtypes."
)
with self.assertRaises(ValueError) as cm:
categorical_df_concat(mismatched_column_names)
self.assertEqual(
str(cm.exception),
"Input DataFrames must have the same columns/dtypes."
)
|
apache-2.0
|
ApolloAuto/apollo
|
modules/tools/prediction/data_pipelines/cruise_models.py
|
3
|
5275
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import argparse
import logging
import os
from sklearn.model_selection import train_test_split
from sklearn.utils import class_weight
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader, sampler
import h5py
import numpy as np
import sklearn
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from modules.tools.prediction.data_pipelines.common.configure import parameters
from proto.cruise_model_pb2 import TensorParameter, InputParameter,\
Conv1dParameter, DenseParameter, ActivationParameter, MaxPool1dParameter,\
AvgPool1dParameter, LaneFeatureConvParameter, ObsFeatureFCParameter,\
ClassifyParameter, RegressParameter, CruiseModelParameter
"""
@requirement:
pytorch 0.4.1
"""
'''
This file includes all model definitions and related loss functions.
'''
'''
Model details:
- Fully-connected layers for classification and regression, respectively.
- It will compute a classification score indicating the probability
of the obstacle choosing the given lane.
- It will also compute a time indicating how soon the obstacle will reach
the center of the given lane.
'''
class FullyConn_NN(torch.nn.Module):
def __init__(self):
super(FullyConn_NN, self).__init__()
self.classify = torch.nn.Sequential(
nn.Linear(174, 88),
nn.Sigmoid(),
nn.Dropout(0.3),
nn.Linear(88, 55),
nn.Sigmoid(),
nn.Dropout(0.2),
nn.Linear(55, 23),
nn.Sigmoid(),
nn.Dropout(0.3),
nn.Linear(23, 10),
nn.Sigmoid(),
nn.Dropout(0.0),
nn.Linear(10, 1),
nn.Sigmoid()
)
self.regress = torch.nn.Sequential(
nn.Linear(174, 88),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(88, 23),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(23, 1),
nn.ReLU()
)
def forward(self, x):
out_c = self.classify(x)
out_r = self.regress(x)
return out_c, out_r
class FCNN_CNN1D(torch.nn.Module):
def __init__(self):
super(FCNN_CNN1D, self).__init__()
self.lane_feature_conv = torch.nn.Sequential(
nn.Conv1d(4, 10, 3, stride=1),\
# nn.BatchNorm1d(10),\
nn.ReLU(),\
#nn.Conv1d(10, 16, 3, stride=2),\
# nn.BatchNorm1d(16),\
# nn.ReLU(),\
nn.Conv1d(10, 25, 3, stride=2),\
# nn.BatchNorm1d(25)
)
self.lane_feature_maxpool = nn.MaxPool1d(4)
self.lane_feature_avgpool = nn.AvgPool1d(4)
self.lane_feature_dropout = nn.Dropout(0.0)
self.obs_feature_fc = torch.nn.Sequential(
nn.Linear(68, 40),
nn.Sigmoid(),
nn.Dropout(0.0),
nn.Linear(40, 24),
nn.Sigmoid(),
nn.Dropout(0.0),
)
self.classify = torch.nn.Sequential(
nn.Linear(124, 66),
nn.Sigmoid(),
nn.Dropout(0.3),
nn.Linear(66, 48),
nn.Sigmoid(),
nn.Dropout(0.1),
nn.Linear(48, 11),
nn.Sigmoid(),
nn.Dropout(0.1),
nn.Linear(11, 1),\
# nn.Sigmoid()
)
self.regress = torch.nn.Sequential(
nn.Linear(125, 77),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(77, 46),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(46, 12),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(12, 1),
nn.ReLU()
)
def forward(self, x):
lane_fea = x[:, -80:]
lane_fea = lane_fea.view(lane_fea.size(0), 4, 20)
obs_fea = x[:, :-80]
lane_fea = self.lane_feature_conv(lane_fea)
lane_fea_max = self.lane_feature_maxpool(lane_fea)
lane_fea_avg = self.lane_feature_avgpool(lane_fea)
lane_fea = torch.cat([lane_fea_max.view(lane_fea_max.size(0), -1),
lane_fea_avg.view(lane_fea_avg.size(0), -1)], 1)
lane_fea = self.lane_feature_dropout(lane_fea)
obs_fea = self.obs_feature_fc(obs_fea)
tot_fea = torch.cat([lane_fea, obs_fea], 1)
out_c = self.classify(tot_fea)
out_r = self.regress(torch.cat([tot_fea, out_c], 1))
return out_c, out_r
|
apache-2.0
|
stober/utils
|
src/plot_utils.py
|
1
|
3102
|
#! /usr/bin/env python
"""
Author: Jeremy M. Stober
Program: PLOTUTILS.PY
Date: Wednesday, April 21 2010
Description: Routines for common plot operations.
"""
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import art3d
import matplotlib.pyplot as plt
from matplotlib.path import Path
import numpy as np
import matplotlib.patches as patches
import pdb
def save_show_no(plotfunc):
# A function decorator that adds the option to save or show a plot
# depending on whether a filename option is set.
def decorate(*args,**kwargs):
ax = plotfunc(*args)
if 'filename' in kwargs.keys():
plt.savefig(kwargs['filename'])
elif 'show' in kwargs.keys():
plt.show()
else:
return ax
return decorate
@save_show_no
def scatter3d(x,y,z):
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(x,y,z)
return ax
@save_show_no
def scatter3d_with_graph(x,y,z,adj):
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(x,y,z,color='b')
# now draw edges between pts that are connected
tmp = np.nonzero(adj)
indices = []
for pt in zip(tmp[0],tmp[1]):
if pt[0] < pt[1]:
indices.append(pt[0])
indices.append(pt[1])
zs = [z[i] for i in indices]
vertices = [[x[i],y[i]] for i in indices]
codes = [Path.MOVETO, Path.LINETO] * (len(indices) / 2)
indx = range(len(indices))
for i in indx[::2]:
l,k = indices[i],indices[i+1]
# just plot the single additional line
ax.plot([x[l],x[k]],[y[l],y[k]],[z[l],z[k]],color='r')
return ax
@save_show_no
def dual_scatter(x,y,colors,lines):
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter3D(x[:,0],x[:,1],np.zeros(len(x[:,0])),c=colors)
ax.scatter3D(y[:,0],y[:,1],np.ones(len(y[:,0])),c=colors)
if lines:
for i in range(len(x)):
if i % 3 == 0:
pt1 = (x[i,0],y[i,0])
pt2 = (x[i,1],y[i,1])
pt3 = (0.0,1.0)
ax.plot(pt1,pt2,pt3,color='gray')
return ax
@save_show_no
def lvl_scatter(lvls,clvls):
fig = plt.figure()
ax = Axes3D(fig)
cnt = 0
for l,c in zip(lvls,clvls):
ax.scatter3D(l[:,0],l[:,1],np.zeros(len(l[:,0])) + cnt,c=c)
cnt += 1
return ax
@save_show_no
def scatter(x,y):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(x,y)
return ax
@save_show_no
def scatter_with_graph(x,y,adj):
# start with a normal scatter plot
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(x,y,color='b')
# now draw edges between pts that are connected
tmp = np.nonzero(adj)
indices = []
for pt in zip(tmp[0],tmp[1]):
if pt[0] < pt[1]:
indices.append(pt[0])
indices.append(pt[1])
vertices = [[x[i],y[i]] for i in indices]
codes = [Path.MOVETO, Path.LINETO] * (len(indices) / 2)
path = Path(vertices, codes)
patch = patches.PathPatch(path,lw=1,color='r')
ax.add_patch(patch)
return ax
|
bsd-2-clause
|
vivekmishra1991/scikit-learn
|
examples/cluster/plot_ward_structured_vs_unstructured.py
|
320
|
3369
|
"""
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
|
bsd-3-clause
|
jseabold/scikit-learn
|
examples/cluster/plot_kmeans_digits.py
|
230
|
4524
|
"""
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
crazy-cat/incubator-mxnet
|
example/speech_recognition/stt_utils.py
|
44
|
5892
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import os.path
import numpy as np
import soundfile
from numpy.lib.stride_tricks import as_strided
logger = logging.getLogger(__name__)
def calc_feat_dim(window, max_freq):
return int(0.001 * window * max_freq) + 1
def conv_output_length(input_length, filter_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
return (output_length + stride - 1) // stride
def spectrogram(samples, fft_length=256, sample_rate=2, hop_length=128):
"""
Compute the spectrogram for a real signal.
The parameters follow the naming convention of
matplotlib.mlab.specgram
Args:
samples (1D array): input audio signal
fft_length (int): number of elements in fft window
sample_rate (scalar): sample rate
hop_length (int): hop length (relative offset between neighboring
fft windows).
Returns:
x (2D array): spectrogram [frequency x time]
freq (1D array): frequency of each row in x
Note:
This is a truncating computation e.g. if fft_length=10,
hop_length=5 and the signal has 23 elements, then the
last 3 elements will be truncated.
"""
assert not np.iscomplexobj(samples), "Must not pass in complex numbers"
window = np.hanning(fft_length)[:, None]
window_norm = np.sum(window ** 2)
# The scaling below follows the convention of
# matplotlib.mlab.specgram which is the same as
# matlabs specgram.
scale = window_norm * sample_rate
trunc = (len(samples) - fft_length) % hop_length
x = samples[:len(samples) - trunc]
# "stride trick" reshape to include overlap
nshape = (fft_length, (len(x) - fft_length) // hop_length + 1)
nstrides = (x.strides[0], x.strides[0] * hop_length)
x = as_strided(x, shape=nshape, strides=nstrides)
# window stride sanity check
assert np.all(x[:, 1] == samples[hop_length:(hop_length + fft_length)])
# broadcast window, compute fft over columns and square mod
# This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT).
x = np.fft.rfft(x * window, axis=0)
x = np.absolute(x) ** 2
# scale, 2.0 for everything except dc and fft_length/2
x[1:-1, :] *= (2.0 / scale)
x[(0, -1), :] /= scale
freqs = float(sample_rate) / fft_length * np.arange(x.shape[0])
return x, freqs
def spectrogram_from_file(filename, step=10, window=20, max_freq=None,
eps=1e-14, overwrite=False, save_feature_as_csvfile=False):
""" Calculate the log of linear spectrogram from FFT energy
Params:
filename (str): Path to the audio file
step (int): Step size in milliseconds between windows
window (int): FFT window size in milliseconds
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned
eps (float): Small value to ensure numerical stability (for ln(x))
"""
csvfilename = filename.replace(".wav", ".csv")
if (os.path.isfile(csvfilename) is False) or overwrite:
with soundfile.SoundFile(filename) as sound_file:
audio = sound_file.read(dtype='float32')
sample_rate = sound_file.samplerate
if audio.ndim >= 2:
audio = np.mean(audio, 1)
if max_freq is None:
max_freq = sample_rate / 2
if max_freq > sample_rate / 2:
raise ValueError("max_freq must not be greater than half of "
" sample rate")
if step > window:
raise ValueError("step size must not be greater than window size")
hop_length = int(0.001 * step * sample_rate)
fft_length = int(0.001 * window * sample_rate)
pxx, freqs = spectrogram(
audio, fft_length=fft_length, sample_rate=sample_rate,
hop_length=hop_length)
ind = np.where(freqs <= max_freq)[0][-1] + 1
res = np.transpose(np.log(pxx[:ind, :] + eps))
if save_feature_as_csvfile:
np.savetxt(csvfilename, res)
return res
else:
return np.loadtxt(csvfilename)
|
apache-2.0
|
iABC2XYZ/abc
|
Epics/DataAna10.1.py
|
1
|
5064
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 27 15:44:34 2017
@author: p
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
plt.close('all')
def GenWeight(shape):
initial = tf.truncated_normal(shape, stddev=1.)
return tf.Variable(initial)
def GenBias(shape):
initial = tf.constant(1., shape=shape)
return tf.Variable(initial)
def getDataRow(exData,sizeRow):
numEx=np.shape(exData)[0]
numCut=5
idChoose1=np.random.randint(numCut,high=numEx,size=(sizeRow))
idChooseCut=np.random.randint(1,high=numCut,size=(1))
idChoose2=idChoose1-idChooseCut
yCHV1=np.reshape(exData[idChoose1,0:14],(sizeRow,7,2))
xBPM1=np.reshape(exData[idChoose1,14:24],(sizeRow,5,2))
yCHV2=np.reshape(exData[idChoose2,0:14],(sizeRow,7,2))
xBPM2=np.reshape(exData[idChoose2,14:24],(sizeRow,5,2))
yCHV=yCHV1-yCHV2
xBPM=xBPM1-xBPM2
return xBPM,yCHV
def conv1d(x, W):
return tf.nn.conv1d(x, W, stride=1, padding="SAME",use_cudnn_on_gpu=True)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding="SAME",use_cudnn_on_gpu=True)
exData=np.loadtxt('/home/node1/Templates/ABC/abc/Epics/Rec.dat')
bpm=tf.placeholder(tf.float32,shape=(None,5,2))
cHV=tf.placeholder(tf.float32,shape=(None,7,2))
xInput=bpm
yInput=cHV
#
nChan1=100
w1= GenWeight([1,2,nChan1])
b1=GenBias([nChan1])
x1=tf.nn.relu(conv1d(xInput, w1)+b1)
#
nChan2=1
n2=nChan1/nChan2
x2=tf.reshape(x1,(-1,5,n2,nChan2))
#
nChan3=5
w3= GenWeight([1,1,nChan2,nChan3])
b3=GenBias([nChan3])
x3=tf.nn.relu(conv2d(x2, w3)+b3)
#
nChan4=5
w4= GenWeight([2,2,nChan2,nChan4])
b4=GenBias([nChan4])
x4=tf.nn.relu(conv2d(x2, w4)+b4)
#
nChan5=5
w5= GenWeight([3,3,nChan2,nChan5])
b5=GenBias([nChan5])
x5=tf.nn.relu(conv2d(x2, w5)+b5)
#
x6=tf.concat((tf.concat((x3,x4),axis=3),x5),axis=3)
#
nChan7=5
w7= GenWeight([3,3,nChan3+nChan4+nChan5,nChan7])
b7=GenBias([nChan7])
x7=tf.nn.relu(conv2d(x6, w7)+b7)
#
x8=tf.reshape(x7,(-1,5*n2*nChan7))
#
w9=GenWeight([5*n2*nChan7,14])
b9=GenBias([14])
x9=tf.matmul(x8,w9)+b9
#
n9_2=250
w9_2=GenWeight([5*n2*nChan7,n9_2])
b9_2=GenBias([n9_2])
x9_2=tf.nn.relu(tf.matmul(x8,w9_2)+b9_2)
#
w10_2=GenWeight([n9_2,14])
b10_2=GenBias([14])
x10_2=tf.matmul(x9_2,w10_2)+b10_2
##
xFinal=x10_2
xOutput=tf.reshape(xFinal,(-1,14))
yOutput=tf.reshape(yInput,(-1,14))
lossFn=tf.reduce_mean(tf.square(xOutput-yOutput))
trainBPM=tf.train.AdamOptimizer(0.01)
optBPM=trainBPM.minimize(lossFn)
iniBPM=tf.global_variables_initializer()
try:
if vars().has_key('se'):
se.close()
except:
pass
#se= tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
se=tf.Session()
se.run(iniBPM)
nIt=2e7
sizeRow=50
stepLossRec=50
nLossRec=np.int32(nIt/stepLossRec+1)
lossRec=np.zeros((nLossRec))
iRec=0
for i in range(np.int32(nIt)):
xBPM,yCHV=getDataRow(exData,sizeRow)
se.run(optBPM,feed_dict={bpm:xBPM,cHV:yCHV})
if i % stepLossRec==0:
lossRecTmp=se.run(lossFn,feed_dict={bpm:xBPM,cHV:yCHV})
lossRec[iRec]=lossRecTmp
iRec+=1
print lossRecTmp
plt.figure('8.3-lossRec')
numPlot=30
plt.clf()
if iRec<=numPlot:
xPlot=np.linspace(0,iRec-1,iRec)
yPlot=lossRec[0:iRec:]
yPlotMean=np.cumsum(yPlot)/(xPlot+1)
else:
xPlot=np.linspace(iRec-numPlot,iRec-1,numPlot)
yPlot=lossRec[iRec-numPlot:iRec:]
yPlotMean[0:-1:]=yPlotMean[1::]
yPlotMean[-1]=np.mean(yPlot)
plt.hold
plt.plot(xPlot,yPlot,'*b')
plt.plot(xPlot,yPlotMean,'go')
plt.grid('on')
plt.title(i)
plt.pause(0.05)
xBPM,yCHV=getDataRow(exData,1)
yCHV_Cal=se.run(xFinal,feed_dict={bpm:xBPM})
plt.figure('8.3-2')
plt.clf()
plt.hold
plt.plot(np.reshape(yCHV[0,:],(14)),'bd')
plt.plot(yCHV_Cal[0,:],'rd')
plt.title(i)
plt.pause(0.05)
#se.close()
xBPMReal_1=np.ones((5,2))*0.
xBPMReal_2=np.ones((5,2))*3.
xBPMReal_3=np.ones((5,2))*(-3.)
xBPMReal_4=np.ones((5,2))
xBPMReal_4[:,0]=xBPMReal_4[:,0]*3.
xBPMReal_4[:,1]=xBPMReal_4[:,1]*(-3.)
xBPMReal=np.zeros((4,5,2))
xBPMReal[0,:,:]=xBPMReal_1
xBPMReal[1,:,:]=xBPMReal_2
xBPMReal[2,:,:]=xBPMReal_3
xBPMReal[3,:,:]=xBPMReal_4
yCHV_Cal4Real=se.run(xFinal,feed_dict={bpm:xBPMReal})
yCHV_Cal4Real_1=np.reshape(yCHV_Cal4Real[0,::],(7,2))
yCHV_Cal4Real_2=np.reshape(yCHV_Cal4Real[1,::],(7,2))
yCHV_Cal4Real_3=np.reshape(yCHV_Cal4Real[2,::],(7,2))
yCHV_Cal4Real_4=np.reshape(yCHV_Cal4Real[3,::],(7,2))
print '----------------- yCHV_Cal4Real_1 --------------------------'
print yCHV_Cal4Real_1
print '----------------- yCHV_Cal4Real_2 --------------------------'
print yCHV_Cal4Real_2
print '----------------- yCHV_Cal4Real_3 --------------------------'
print yCHV_Cal4Real_3
print '----------------- yCHV_Cal4Real_4 --------------------------'
print yCHV_Cal4Real_4
|
gpl-3.0
|
xzh86/scikit-learn
|
sklearn/tests/test_base.py
|
216
|
7045
|
# Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
|
bsd-3-clause
|
alisidd/tensorflow
|
tensorflow/examples/learn/wide_n_deep_tutorial.py
|
29
|
8985
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
from six.moves import urllib
import pandas as pd
import tensorflow as tf
COLUMNS = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"]
LABEL_COLUMN = "label"
CATEGORICAL_COLUMNS = ["workclass", "education", "marital_status", "occupation",
"relationship", "race", "gender", "native_country"]
CONTINUOUS_COLUMNS = ["age", "education_num", "capital_gain", "capital_loss",
"hours_per_week"]
def maybe_download(train_data, test_data):
"""Maybe downloads training data and returns train and test file names."""
if train_data:
train_file_name = train_data
else:
train_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve("http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data", train_file.name) # pylint: disable=line-too-long
train_file_name = train_file.name
train_file.close()
print("Training data is downloaded to %s" % train_file_name)
if test_data:
test_file_name = test_data
else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve("http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test", test_file.name) # pylint: disable=line-too-long
test_file_name = test_file.name
test_file.close()
print("Test data is downloaded to %s" % test_file_name)
return train_file_name, test_file_name
def build_estimator(model_dir, model_type):
"""Build an estimator."""
# Sparse base columns.
gender = tf.contrib.layers.sparse_column_with_keys(column_name="gender",
keys=["female", "male"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
# Transformations.
age_buckets = tf.contrib.layers.bucketized_column(age,
boundaries=[
18, 25, 30, 35, 40, 45,
50, 55, 60, 65
])
# Wide columns and deep columns.
wide_columns = [gender, native_country, education, occupation, workclass,
relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column(
[age_buckets, education, occupation],
hash_bucket_size=int(1e6)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(native_country,
dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
]
if model_type == "wide":
m = tf.contrib.learn.LinearClassifier(model_dir=model_dir,
feature_columns=wide_columns)
elif model_type == "deep":
m = tf.contrib.learn.DNNClassifier(model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50],
fix_global_step_increment_bug=True)
return m
def input_fn(df):
"""Input builder function."""
# Creates a dictionary mapping from each continuous feature column name (k) to
# the values of that column stored in a constant Tensor.
continuous_cols = {k: tf.constant(df[k].values) for k in CONTINUOUS_COLUMNS}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in CATEGORICAL_COLUMNS}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols)
feature_cols.update(categorical_cols)
# Converts the label column into a constant Tensor.
label = tf.constant(df[LABEL_COLUMN].values)
# Returns the feature columns and the label.
return feature_cols, label
def train_and_eval(model_dir, model_type, train_steps, train_data, test_data):
"""Train and evaluate the model."""
train_file_name, test_file_name = maybe_download(train_data, test_data)
df_train = pd.read_csv(
tf.gfile.Open(train_file_name),
names=COLUMNS,
skipinitialspace=True,
engine="python")
df_test = pd.read_csv(
tf.gfile.Open(test_file_name),
names=COLUMNS,
skipinitialspace=True,
skiprows=1,
engine="python")
# remove NaN elements
df_train = df_train.dropna(how='any', axis=0)
df_test = df_test.dropna(how='any', axis=0)
df_train[LABEL_COLUMN] = (
df_train["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
df_test[LABEL_COLUMN] = (
df_test["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
model_dir = tempfile.mkdtemp() if not model_dir else model_dir
print("model directory = %s" % model_dir)
m = build_estimator(model_dir, model_type)
m.fit(input_fn=lambda: input_fn(df_train), steps=train_steps)
results = m.evaluate(input_fn=lambda: input_fn(df_test), steps=1)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
FLAGS = None
def main(_):
train_and_eval(FLAGS.model_dir, FLAGS.model_type, FLAGS.train_steps,
FLAGS.train_data, FLAGS.test_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Base directory for output models."
)
parser.add_argument(
"--model_type",
type=str,
default="wide_n_deep",
help="Valid model types: {'wide', 'deep', 'wide_n_deep'}."
)
parser.add_argument(
"--train_steps",
type=int,
default=200,
help="Number of training steps."
)
parser.add_argument(
"--train_data",
type=str,
default="",
help="Path to the training data."
)
parser.add_argument(
"--test_data",
type=str,
default="",
help="Path to the test data."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
re-innovation/CSVviewer
|
windrose.py
|
1
|
20019
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
application.py
@author: James Fowkes
Adapted from Lionel Roubeyrie as below.
Generates windrose plot of speed/direction data
"""
__version__ = '1.4'
__author__ = 'Lionel Roubeyrie'
__mail__ = 'lionel.roubeyrie@gmail.com'
__license__ = 'CeCILL-B'
# This module uses a LOT of numpy calls
# pylint's underlying astroid library cannot find numpy functions
# So disable the warning for the WHOLE MODULE <-- somewhat dangerous
# http://stackoverflow.com/questions/20553551/how-do-i-get-pylint-to-recognize-numpy-members
# This suggests that another astroid version can do this, so check in the future.
#pylint: disable=no-member
#
# Also, there are some lines that use numpy's "advanced indexing" feature. pylint assumes
# these are normal indexes and throws a warning. Therefore disable=invalid-sequence-index
# is used for these lines.
import matplotlib
import matplotlib.cm as cm
import numpy as np
from matplotlib.patches import Rectangle
from matplotlib.projections.polar import PolarAxes
from numpy.lib.twodim_base import histogram2d
from pylab import poly_between
RESOLUTION = 100
ZBASE = -1000 #The starting zorder for all drawing, negative to have the grid on
def _colors(cmap, num):
'''
Returns a list of n colors based on the colormap cmap
'''
return [cmap(i) for i in np.linspace(0.0, 1.0, num)]
class WindroseAxes(PolarAxes):
"""
Create a windrose axes
"""
def __init__(self, *args, **kwargs):
"""
See Axes base class for args and kwargs documentation
"""
#Uncomment to have the possibility to change the resolution directly
#when the instance is created
#self.RESOLUTION = kwargs.pop('resolution', 100)
PolarAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.radii_angle = 67.5
self.legend_ = None
self.cla()
def cla(self):
"""
Clear the current axes
"""
PolarAxes.cla(self)
self.theta_angles = np.arange(0, 360, 45)
self.theta_labels = ['E', 'N-E', 'N', 'N-W', 'W', 'S-W', 'S', 'S-E']
self.set_thetagrids(angles=self.theta_angles, labels=self.theta_labels)
self._info = {'direction' : list(),
'bins' : list(),
'table' : list()}
self.patches_list = list()
def set_radii_angle(self, **kwargs):
"""
Set the radii labels angle
"""
_ = kwargs.pop('labels', None)
angle = kwargs.pop('angle', None)
if angle is None:
angle = self.radii_angle
self.radii_angle = angle
radii = np.linspace(0.1, self.get_rmax(), 6)
radii_labels = ["%.1f" %r for r in radii]
radii_labels[0] = "" #Removing label 0
_ = self.set_rgrids(radii=radii, labels=radii_labels,
angle=self.radii_angle, **kwargs)
def _update(self):
"""
Updates the radii information for plotting patches
"""
self.set_rmax(rmax=np.max(np.sum(self._info['table'], axis=0)))
self.set_radii_angle(angle=self.radii_angle)
def legend(self, loc='lower left', **kwargs):
"""
Sets the legend location and her properties.
The location codes are
'best' : 0,
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
If none of these are suitable, loc can be a 2-tuple giving x,y
in axes coords, ie,
loc = (0, 1) is left top
loc = (0.5, 0.5) is center, center
and so on. The following kwargs are supported:
isaxes=True # whether this is an axes legend
prop = FontProperties(size='smaller') # the font property
pad = 0.2 # the fractional whitespace inside the legend border
shadow # if True, draw a shadow behind legend
labelsep = 0.005 # the vertical space between the legend entries
handlelen = 0.05 # the length of the legend lines
handletextsep = 0.02 # the space between the legend line and legend text
axespad = 0.02 # the border between the axes and legend edge
"""
def get_handles():
"""
Return a list of rectangles for each patch in that patches colour
"""
handles = list()
for patch in self.patches_list:
if isinstance(patch, matplotlib.patches.Polygon) or \
isinstance(patch, matplotlib.patches.Rectangle):
color = patch.get_facecolor()
elif isinstance(patch, matplotlib.lines.Line2D):
color = patch.get_color()
else:
raise AttributeError("Can't handle patches")
handles.append(Rectangle((0, 0), 0.2, 0.2, facecolor=color, edgecolor='black'))
return handles
def get_labels():
""" Make label strings from label information in dict """
labels = np.copy(self._info['bins'])
labels = ["[%.1f : %0.1f[" %(labels[i], labels[i+1]) \
for i in range(len(labels)-1)]
return labels
_ = kwargs.pop('labels', None)
_ = kwargs.pop('handles', None)
handles = get_handles()
labels = get_labels()
self.legend_ = matplotlib.legend.Legend(self, handles, labels, loc, **kwargs)
return self.legend_
def _init_plot(self, direction, var, **kwargs):
"""
Internal method used by all plotting commands
"""
#self.cla()
_ = kwargs.pop('zorder', None)
#Init of the bins array if not set
bins = kwargs.pop('bins', None)
if bins is None:
bins = np.linspace(np.min(var), np.max(var), 6)
if isinstance(bins, int):
bins = np.linspace(np.min(var), np.max(var), bins)
bins = np.asarray(bins)
nbins = len(bins)
#Number of sectors
nsector = kwargs.pop('nsector', None)
if nsector is None:
nsector = 16
#Sets the colors table based on the colormap or the "colors" argument
colors = kwargs.pop('colors', None)
cmap = kwargs.pop('cmap', None)
if colors is not None:
if isinstance(colors, str):
colors = [colors]*nbins
if isinstance(colors, (tuple, list)):
if len(colors) != nbins:
raise ValueError("colors and bins must have same length")
else:
if cmap is None:
cmap = cm.jet
colors = _colors(cmap, nbins)
#Building the angles list
angles = np.arange(0, -2*np.pi, -2*np.pi/nsector) + np.pi/2
normed = kwargs.pop('normed', False)
blowto = kwargs.pop('blowto', False)
#Set the global information dictionary
information_dict = histogram(direction, var, bins, nsector, normed, blowto)
self._info['direction'], self._info['bins'], self._info['table'] = information_dict
return bins, nbins, nsector, colors, angles, kwargs
def contour(self, direction, var, **kwargs):
"""
Plot a windrose in linear mode. For each var bins, a line will be
draw on the axes, a segment between each sector (center to center).
Each line can be formated (color, width, ...) like with standard plot
pylab command.
Mandatory:
* direction : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6, then
bins=linspace(min(var), max(var), 6)
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
others kwargs : see help(pylab.plot)
"""
# _ is for bins, which is not required
_, nbins, nsector, colors, angles, kwargs = self._init_plot(direction, var, **kwargs)
#closing lines
angles = np.hstack((angles, angles[-1]-2*np.pi/nsector))
vals = np.hstack((
self._info['table'],
np.reshape(
self._info['table'][:, 0], #pylint: disable=invalid-sequence-index
(self._info['table'].shape[0], 1))))
offset = 0
for i in range(nbins):
val = vals[i, :] + offset
offset += vals[i, :]
zorder = ZBASE + nbins - i
patch = self.plot(angles, val, color=colors[i], zorder=zorder, **kwargs)
self.patches_list.extend(patch)
self._update()
def contourf(self, direction, var, **kwargs):
"""
Plot a windrose in filled mode. For each var bins, a line will be
draw on the axes, a segment between each sector (center to center).
Each line can be formated (color, width, ...) like with standard plot
pylab command.
Mandatory:
* direction : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6, then
bins=linspace(min(var), max(var), 6)
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
others kwargs : see help(pylab.plot)
"""
# This function does have a lot of local variables and is a candidate for refactoring.
# In the meantime, disable the warning
#pylint: disable=too-many-locals
# _ is for bins, which is not required
_, nbins, nsector, colors, angles, kwargs = self._init_plot(direction, var, **kwargs)
_ = kwargs.pop('facecolor', None)
_ = kwargs.pop('edgecolor', None)
#closing lines
angles = np.hstack((angles, angles[-1]-2*np.pi/nsector))
vals = np.hstack((
self._info['table'],
np.reshape(
self._info['table'][:, 0], #pylint: disable=invalid-sequence-index
(self._info['table'].shape[0], 1))))
offset = 0
for i in range(nbins):
val = vals[i, :] + offset
offset += vals[i, :]
zorder = ZBASE + nbins - i
xlocs, ylocs = poly_between(angles, 0, val)
patch = self.fill(xlocs, ylocs, facecolor=colors[i],
edgecolor=colors[i], zorder=zorder, **kwargs)
self.patches_list.extend(patch)
def bar(self, direction, var, **kwargs):
"""
Plot a windrose in bar mode. For each var bins and for each sector,
a colored bar will be draw on the axes.
Mandatory:
* direction : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6 between min(var) and max(var).
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
edgecolor : string - The string color each edge bar will be plotted.
Default : no edgecolor
* opening : float - between 0.0 and 1.0, to control the space between
each sector (1.0 for no space)
"""
# This function does have a lot of local variables and is a candidate for refactoring.
# In the meantime, disable the warning
#pylint: disable=too-many-locals
# _ is for bins, which is not required
_, nbins, nsector, colors, angles, kwargs = self._init_plot(direction, var, **kwargs)
_ = kwargs.pop('facecolor', None)
edgecolor = kwargs.pop('edgecolor', None)
if edgecolor is not None:
if not isinstance(edgecolor, str):
raise ValueError('edgecolor must be a string color')
opening = kwargs.pop('opening', None)
if opening is None:
opening = 0.8
dtheta = 2*np.pi/nsector
opening = dtheta*opening
for j in range(nsector):
offset = 0
for i in range(nbins):
if i > 0:
offset += self._info['table'][i-1, j] #pylint: disable=invalid-sequence-index
val = self._info['table'][i, j] #pylint: disable=invalid-sequence-index
zorder = ZBASE + nbins - i
patch = Rectangle(
(angles[j]-opening/2, offset), opening, val,
facecolor=colors[i], edgecolor=edgecolor, zorder=zorder,
**kwargs)
self.add_patch(patch)
if j == 0:
self.patches_list.append(patch)
self._update()
def box(self, direction, var, **kwargs):
"""
Plot a windrose in proportional bar mode. For each var bins and for each
sector, a colored bar will be draw on the axes.
Mandatory:
* direction : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6 between min(var) and max(var).
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
edgecolor : string - The string color each edge bar will be plotted.
Default : no edgecolor
"""
# This function does have a lot of local variables and is a candidate for refactoring.
# In the meantime, disable the warning
#pylint: disable=too-many-locals
# _ is for bins, which is not required
_, nbins, nsector, colors, angles, kwargs = self._init_plot(direction, var, **kwargs)
_ = kwargs.pop('facecolor', None)
edgecolor = kwargs.pop('edgecolor', None)
if edgecolor is not None:
if not isinstance(edgecolor, str):
raise ValueError('edgecolor must be a string color')
opening = np.linspace(0.0, np.pi/16, nbins)
for j in range(nsector):
offset = 0
for i in range(nbins):
if i > 0:
offset += self._info['table'][i-1, j] #pylint: disable=invalid-sequence-index
val = self._info['table'][i, j] #pylint: disable=invalid-sequence-index
zorder = ZBASE + nbins - i
patch = Rectangle(
(angles[j]-opening[i]/2, offset), opening[i],
val, facecolor=colors[i], edgecolor=edgecolor,
zorder=zorder, **kwargs)
self.add_patch(patch)
if j == 0:
self.patches_list.append(patch)
self._update()
def histogram(direction, var, bins, nsector, normed=False, blowto=False): #pylint: disable=too-many-arguments
"""
Returns an array where, for each sector of wind
(centred on the north), we have the number of time the wind comes with a
particular var (speed, polluant concentration, ...).
* direction : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
* bins : list - list of var category against we're going to compute the table
* nsector : integer - number of sectors
* normed : boolean - The resulting table is normed in percent or not.
* blowto : boolean - Normaly a windrose is computed with directions
as wind blows from. If true, the table will be reversed (usefull for
pollutantrose)
"""
if len(var) != len(direction):
raise ValueError("var (%d) and direction (%d) must have same length" % (len(var), len(direction)))
angle = 360./nsector
dir_bins = np.arange(-angle/2, 360.+angle, angle, dtype=np.float)
dir_edges = dir_bins.tolist()
dir_edges.pop(-1)
dir_edges[0] = dir_edges.pop(-1)
dir_bins[0] = 0.
var_bins = bins.tolist()
var_bins.append(np.inf)
if blowto:
direction = direction + 180.
direction[direction >= 360.] = direction[direction >= 360.] - 360
table = histogram2d(x=var, y=direction, bins=[var_bins, dir_bins], normed=False)[0]
# add the last value to the first to have the table of North winds
table[:, 0] = table[:, 0] + table[:, -1]
# and remove the last col
table = table[:, :-1]
if normed:
table = table*100/table.sum()
return dir_edges, var_bins, table
|
gpl-2.0
|
gdooper/scipy
|
scipy/signal/waveforms.py
|
11
|
17267
|
# Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly',
'unit_impulse']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `width` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
def unit_impulse(shape, idx=None, dtype=float):
"""
Unit impulse signal (discrete delta function) or unit basis vector.
Parameters
----------
shape : int or tuple of int
Number of samples in the output (1-D), or a tuple that represents the
shape of the output (N-D).
idx : None or int or tuple of int or 'mid', optional
Index at which the value is 1. If None, defaults to the 0th element.
If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in
all dimensions. If an int, the impulse will be at `idx` in all
dimensions.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
Returns
-------
y : ndarray
Output array containing an impulse signal.
Notes
-----
The 1D case is also known as the Kronecker delta.
.. versionadded:: 0.19.0
Examples
--------
An impulse at the 0th element (:math:`\delta[n]`):
>>> from scipy import signal
>>> signal.unit_impulse(8)
array([ 1., 0., 0., 0., 0., 0., 0., 0.])
Impulse offset by 2 samples (:math:`\delta[n-2]`):
>>> signal.unit_impulse(7, 2)
array([ 0., 0., 1., 0., 0., 0., 0.])
2-dimensional impulse, centered:
>>> signal.unit_impulse((3, 3), 'mid')
array([[ 0., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 0.]])
Impulse at (2, 2), using broadcasting:
>>> signal.unit_impulse((4, 4), 2)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 1., 0.],
[ 0., 0., 0., 0.]])
Plot the impulse response of a 4th-order Butterworth lowpass filter:
>>> imp = signal.unit_impulse(100, 'mid')
>>> b, a = signal.butter(4, 0.2)
>>> response = signal.lfilter(b, a, imp)
>>> import matplotlib.pyplot as plt
>>> plt.plot(np.arange(-50, 50), imp)
>>> plt.plot(np.arange(-50, 50), response)
>>> plt.margins(0.1, 0.1)
>>> plt.xlabel('Time [samples]')
>>> plt.ylabel('Amplitude')
>>> plt.grid(True)
>>> plt.show()
"""
out = zeros(shape, dtype)
shape = np.atleast_1d(shape)
if idx is None:
idx = (0,) * len(shape)
elif idx == 'mid':
idx = tuple(shape // 2)
elif not hasattr(idx, "__iter__"):
idx = (idx,) * len(shape)
out[idx] = 1
return out
|
bsd-3-clause
|
kyleabeauchamp/HMCNotes
|
code/old/test_ghmc_respa.py
|
1
|
1175
|
import lb_loader
import numpy as np
import pandas as pd
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
pd.set_option('display.width', 1000)
platform = mm.Platform_getPlatformByName("CUDA")
platform_properties = dict(CudaPrecision="single")
n_steps = 2500
temperature = 300. * u.kelvin
collision_rate = 1.0 / u.picoseconds
cutoff = 1.0 * u.nanometers
hydrogenMass = 3.0 * u.amu
testsystem = testsystems.DHFRExplicit(hydrogenMass=hydrogenMass, cutoff=cutoff)
system, positions = testsystem.system, testsystem.positions
positions = lb_loader.pre_equil(system, positions, temperature)
steps_per_hmc = 17
timestep = 1.5 * u.femtoseconds
hmc_integrators.guess_force_groups(system, nonbonded=1, fft=1, others=0)
factor = 1
groups = [(0, 2), (1, 1)]
for i in range(3):
integrator = hmc_integrators.GHMCRESPA(temperature, steps_per_hmc, timestep, collision_rate, groups)
context = mm.Context(system, integrator, platform, platform_properties)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(1)
integrator.step(n_steps)
integrator.vstep(5)
|
gpl-2.0
|
accpy/accpy
|
accpy/gui/simulate.py
|
2
|
14528
|
# -*- coding: utf-8 -*-
''' accpy.gui.simulate
author: felix.kramer(at)physik.hu-berlin.de
'''
from __future__ import division
try:
from Tkinter import N, E, S, W, LabelFrame, _setit, BOTH
from tkFileDialog import askopenfilename
from tkMessageBox import showerror
except:
from tkinter import N, E, S, W, LabelFrame, _setit, BOTH
from tkinter.filedialog import askopenfilename
from tkinter.messagebox import showerror
from time import time
from threading import Thread
from multiprocessing import cpu_count
from matplotlib import use
use('TkAgg')
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,
NavigationToolbar2TkAgg)
from matplotlib.pyplot import close
from numpy import zeros, where, sort, concatenate, array, loadtxt
from .layout import (cs_tabbar, cs_label, cs_Intentry, cs_Dblentry, cs_button,
cs_dropd, cs_Strentry)
from ..lattices.reader import lattlist, latt2py
from ..visualize.stringformat import time2str, uc
from ..simulate.lsd import lsd
from ..simulate.ramp import simulate_ramp
from ..simulate.quadscan import simulate_quadscan
def showfigs(t0, status, figs, tabs):
close('all')
for fig, tab in zip(figs, tabs):
# destroy all widgets in fram/tab and close all figures
for widget in tab.winfo_children():
widget.destroy()
canvas = FigureCanvasTkAgg(fig, master=tab)
toolbar = NavigationToolbar2TkAgg(canvas, tab)
canvas.get_tk_widget().pack()
toolbar.pack()
canvas.draw()
timestring = time2str(time() - t0)
status.set('Status: finished, elapsed time: ' + timestring)
def runthread(status, tabs, f_simulate, argstuple):
def run(*argstuple):
t0 = time()
status.set('Status: running...')
figs = f_simulate(*argstuple)
showfigs(t0, status, figs, tabs[1:])
# data plotting in new thread to keep gui (main thread&loop) responsive
t_run = Thread(target=run, args=argstuple)
# automatically let die with main thread -> no global stop required
t_run.setDaemon(True)
# start thread
t_run.start()
def gui_twisstrack(frame, w, h, status, start):
def _start():
latt = latticemenu.get()
if latt in closedlatts:
closed = True
elif latt in openlatts:
closed = False
else:
showerror(title='ERROR', message='Please choose a lattice')
return
slic = int(entry_slice.get())
mode = 'trackbeta'
particles = 1
rounds = 1
runthread(status, tabs, lsd,
(closed, latt, slic, mode, particles, rounds))
start.configure(command=_start)
tabs = cs_tabbar(frame, w, h, ['Menu', 'Radial', 'Axial', 'Dispersion',
'Overview', 'Parameters', 'Beam extents'])
cs_label(tabs[0], 1, 1, 'Lattice')
cs_label(tabs[0], 1, 2, 'Nr. of slices')
closedlatts, openlatts = lattlist()
latticemenu = cs_dropd(tabs[0], 2, 1, closedlatts + openlatts)
entry_slice = cs_Intentry(tabs[0], 2, 2, 1e3)
return
def gui_parttrack(frame, w, h, status, start):
def _start():
latt = latticemenu.get()
slic = int(entry_slice.get())
if latt in closedlatts:
closed = True
rnds = int(entry_round.get())
elif latt in openlatts:
closed = False
rnds = int(1)
else:
showerror(title='ERROR', message='Please choose a lattice')
return
mode = 'trackpart'
prts = int(entry_parts.get())
runthread(status, tabs, lsd,
(closed, latt, slic, mode, prts, rnds))
def _check(*args):
lattice = latticemenu.get()
if lattice in openlatts:
roundslabel.set('')
entry_round.grid_remove()
else:
roundslabel.set('Nr. of rounds')
entry_round.grid(row=2, column=4)
start.configure(command=_start)
tabs = cs_tabbar(frame, w, h, [' Menu ', ' X ', ' X\' ', ' Y ', ' Y\' ',
' Z ', ' Z\' ', ' Overview ',
' Transverse phase space '])
cs_label(tabs[0], 1, 1, 'Lattice')
cs_label(tabs[0], 1, 2, 'Nr. of slices')
cs_label(tabs[0], 1, 3, 'Nr. of particles (parallelized)')
roundslabel = cs_label(tabs[0], 1, 4, 'Nr. of rounds')
closedlatts, openlatts = lattlist()
latticemenu = cs_dropd(tabs[0], 2, 1, closedlatts+openlatts, action=_check)
entry_slice = cs_Intentry(tabs[0], 2, 2, 100)
entry_parts = cs_Intentry(tabs[0], 2, 3, cpu_count())
entry_round = cs_Intentry(tabs[0], 2, 4, 100)
return
def gui_ramp(frame, w, h, status, start):
def _start():
points = int(entry_pnts.get())
T_per = float(entry_Tper.get())
t_inj = float(entry_tinj.get())
t_ext = float(entry_text.get())
text2 = float(entry_tex2.get())
E_inj = float(entry_Einj.get())*1e6
E_ext = float(entry_Eext.get())*1e6
latt = lattice.get()
if latt not in closedlatts:
showerror(title='ERROR', message='Please choose a lattice')
return
f_HF = float(entry_f_HF.get())*1e6
V_HFs = [float(x)*1e3 for x in entry_V_HF.get().split()]
emitxs = [float(x)*1e-9 for x in entry_emitx.get().split()]
emitys = [float(x)*1e-9 for x in entry_emity.get().split()]
emitss = [float(x)*1e-3 for x in entry_emits.get().split()]
runthread(status, tabs, simulate_ramp,
(T_per, t_inj, t_ext, text2, E_inj, E_ext, latt, points,
f_HF, V_HFs, emitxs, emitys, emitss))
start.configure(command=_start)
tabs = cs_tabbar(frame, w, h, ['Menu', 'Energy', 'Magnetic Flux',
'Energy loss', 'Acceleration voltage',
'Synchronous phase',
'Synchrotron frequency', 'Bunch length',
'Radial Emittance', 'Axial Emittance',
'Longitudinal Emittance'])
cs_label(tabs[0], 1, 1, 'Lattice')
cs_label(tabs[0], 1, 2, 'Acceleration Period / s')
cs_label(tabs[0], 1, 3, 'Injection time / s')
cs_label(tabs[0], 1, 4, 'Extraction time 1 / s')
cs_label(tabs[0], 1, 5, 'Extraction time 2 / s')
closedlatts, _ = lattlist()
lattice = cs_dropd(tabs[0], 2, 1, closedlatts)
entry_Tper = cs_Dblentry(tabs[0], 2, 2, 1e-1)
entry_tinj = cs_Dblentry(tabs[0], 2, 3, 5518.944e-6)
entry_text = cs_Dblentry(tabs[0], 2, 4, 0.03839) # 38377.114e-6
entry_tex2 = cs_Dblentry(tabs[0], 2, 5, 0.05696) # 57076.1e-6
cs_label(tabs[0], 3, 1, 'Calculation points')
cs_label(tabs[0], 3, 2, 'Cavity frequency / MHz')
cs_label(tabs[0], 3, 3, 'Injection energy / MeV')
cs_label(tabs[0], 3, 4, 'Extraction energy / MeV')
entry_pnts = cs_Intentry(tabs[0], 4, 1, 1e3)
entry_f_HF = cs_Dblentry(tabs[0], 4, 2, 499.667)
entry_Einj = cs_Dblentry(tabs[0], 4, 3, 52.3)
entry_Eext = cs_Dblentry(tabs[0], 4, 4, 1720)
cs_label(tabs[0], 5, 1, 'Cavity peak Voltages / kV')
cs_label(tabs[0], 5, 3, 'Emittance @ injection')
cs_label(tabs[0], 6, 2, 'Radial')
cs_label(tabs[0], 7, 2, 'Axial')
cs_label(tabs[0], 8, 2, 'Longitudinal')
cs_label(tabs[0], 6, 4, 'nm '+uc.pi+' rad')
cs_label(tabs[0], 7, 4, 'nm '+uc.pi+' rad')
cs_label(tabs[0], 8, 4, uc.ppt)
entry_V_HF = cs_Strentry(tabs[0], 6, 1, '720 20000')
entry_emitx = cs_Strentry(tabs[0], 6, 3, '100 200 300')
entry_emity = cs_Strentry(tabs[0], 7, 3, '100 200 300')
entry_emits = cs_Strentry(tabs[0], 8, 3, '1 2 3')
return
def gui_quadscansim(frame, w, h, status, start):
def _start():
lattice = latticemenu.get()
if lattice == 'drift':
qL = float(entry_qL.get())
driftlength = float(entry_dlen.get())
UC = zeros([6, 1])
UC[1] = driftlength
else:
quad = int(quadmenuval.get())
screen = screenmenuval.get()
_, _, _, UC, diagnostics, _, _, _, _, _, _, _ = latt2py(lattice, False)
elements = UC[0, :]
# index after selected quad (quad itself not required)
i = sort(concatenate((where(elements==3), where(elements==4)), 1))[0][quad-1]+1
# index before selected fom (next would be fom itself)
fom = where(array(diagnostics) == screen)[0][0]
f = where(elements==7)[0][fom]
if i > f:
showerror(title='ERROR', message='Please choose a quad before chosen screen')
return
qL = UC[1, i-1]
UC = UC[:, i:f]
ki = float(entry_ki.get())
kf = float(entry_kf.get())
points = int(entry_points.get())
epsx = float(entry_epsx.get())/1e9
betx = float(entry_betx.get())
alpx = float(entry_alpx.get())
epsy = float(entry_epsy.get())/1e9
bety = float(entry_bety.get())
alpy = float(entry_alpy.get())
epss = (float(entry_epss.get())/1e3)**2
Dx = float(entry_Dx.get())
Dpx = float(entry_Dpx.get())
energy = float(entry_energy.get())*1e6
particle = 'electron'
if filestr.get() != '':
data = loadtxt(filestr.get())
else:
data = None
if betx < 0 or bety < 0:
showerror('ERROR', 'beta function must be positive')
return
runthread(status, [1, lf_OUT], simulate_quadscan,
(ki, kf, qL, UC, points, epsx, betx, alpx,
epsy, bety, alpy, epss, Dx, Dpx, energy, particle, data))
def _load():
global filename
filename = askopenfilename(initialdir='accpy/exampledata/')
if filename[-5::] != '.hdf5':
#filestr.set('error: {} is not hdf5 file-type'.format(filename))
#showerror('ERROR', 'THIS IS NOT A HDF5 FILE')
filestr.set(filename)
else:
filestr.set(filename)
def _check(*args):
lattice = latticemenu.get()
if lattice == 'drift':
quadN.grid_remove()
quadmenu.grid_remove()
screenN.grid_remove()
screenmenu.grid_remove()
quadlab.grid()
quadent.grid()
driftlab.grid()
driftent.grid()
else:
_, _, _, UC, diagnostics, _, _, _, _, _, _, _ = latt2py(lattice, False)
elements = list(UC[0,:])
quads = range(1, (elements.count(3)+elements.count(4)+1))
quadlab.grid_remove()
quadent.grid_remove()
driftlab.grid_remove()
driftent.grid_remove()
screenmenu['menu'].delete(0, 'end')
quadmenu['menu'].delete(0, 'end')
[screenmenu['menu'].add_command(label=fom, command=_setit(screenmenuval, fom)) for fom in diagnostics]
[quadmenu['menu'].add_command(label=quad, command=_setit(quadmenuval, quad)) for quad in quads]
screenN.grid()
quadN.grid()
screenmenu.grid()
quadmenu.grid()
start.configure(command=_start)
frame.pack(fill=BOTH, expand=1)
lf_upbeam = LabelFrame(frame, text="Upstream beam parameters", padx=5, pady=5)
lf_upbeam.grid(row=1, column=0, sticky=W+E+N+S, padx=10, pady=10)
lf_transfer = LabelFrame(frame, text="Transport matrix", padx=5, pady=5)
lf_transfer.grid(row=2, column=0, sticky=W+E+N+S, padx=10, pady=10)
lf_quadrupole = LabelFrame(frame, text="Quadrupole range", padx=5, pady=5)
lf_quadrupole.grid(row=1, column=1, sticky=W+E+N+S, padx=10, pady=10)
lf_data = LabelFrame(frame, text="Data comparison", padx=5, pady=5)
lf_data.grid(row=2, column=1, sticky=W+E+N+S, padx=10, pady=10)
lf_OUT = LabelFrame(frame, text="Results", padx=5, pady=5)
lf_OUT.grid(row=3, column=0, columnspan=2, sticky=W+E+N+S, padx=10, pady=10)
cs_label(lf_upbeam, 1, 2, uc.epsilon+' / nm rad')
cs_label(lf_upbeam, 1, 3, uc.beta+' / m')
cs_label(lf_upbeam, 1, 4, uc.alpha+'/ rad')
cs_label(lf_upbeam, 2, 1, 'Radial')
entry_epsx = cs_Dblentry(lf_upbeam, 2, 2, 202)
entry_betx = cs_Dblentry(lf_upbeam, 2, 3, 6.37)
entry_alpx = cs_Dblentry(lf_upbeam, 2, 4, -0.13)
cs_label(lf_upbeam, 3, 1, 'Axial')
entry_epsy = cs_Dblentry(lf_upbeam, 3, 2, 144)
entry_bety = cs_Dblentry(lf_upbeam, 3, 3, 4.1)
entry_alpy = cs_Dblentry(lf_upbeam, 3, 4, -0.51)
cs_label(lf_upbeam, 4, 2, uc.delta+' / '+uc.ppt)
cs_label(lf_upbeam, 4, 3, 'D / m')
cs_label(lf_upbeam, 4, 4, 'D\' / rad')
cs_label(lf_upbeam, 5, 1, 'Longitudinal')
entry_epss = cs_Dblentry(lf_upbeam, 5, 2, 2.4)
entry_Dx = cs_Dblentry(lf_upbeam, 5, 3, 0.)
entry_Dpx = cs_Dblentry(lf_upbeam, 5, 4, 0.)
cs_label(lf_transfer, 0, 0, 'Optic', sticky=W)
_, openlatts = lattlist()
latticemenu = cs_dropd(lf_transfer, 1, 0, ['drift'] + openlatts, action=_check)
quadlab = cs_label(lf_transfer, 0, 1, 'Quadrupole length / m', retlab=True)[1]
quadent = entry_qL = cs_Dblentry(lf_transfer, 0, 2, .119)
driftlab = cs_label(lf_transfer, 1, 1, 'Driftlength / m', retlab=True)[1]
driftent = entry_dlen = cs_Dblentry(lf_transfer, 1, 2, 1.8)
quadN = cs_label(lf_transfer, 0, 1, 'Which quadrupole', retlab=True)[1]
quadmenuval, quadmenu = cs_dropd(lf_transfer, 1, 1, [''], retbut=True)
screenN = cs_label(lf_transfer, 2, 1, 'Which screen', retlab=True)[1]
screenmenuval, screenmenu = cs_dropd(lf_transfer, 3, 1, openlatts, retbut=True)
quadN.grid_remove()
quadmenu.grid_remove()
screenN.grid_remove()
screenmenu.grid_remove()
quadlab.grid_remove()
quadent.grid_remove()
driftlab.grid_remove()
driftent.grid_remove()
cs_button(lf_data, 0, 0, 'Load', _load)
filestr = cs_label(lf_data, 1, 0, '')
cs_label(lf_quadrupole, 0, 0, 'Beam energy / MeV')
entry_energy = cs_Dblentry(lf_quadrupole, 0, 1, 52)
cs_label(lf_quadrupole, 1, 1, 'Quadrupole strength k / (1/m'+uc.squared+')')
cs_label(lf_quadrupole, 2, 0, 'Initial (k<0 = axial focus)')
entry_ki = cs_Dblentry(lf_quadrupole, 2, 1, -8)
cs_label(lf_quadrupole, 3, 0, 'Final')
entry_kf = cs_Dblentry(lf_quadrupole, 3, 1, 8)
cs_label(lf_quadrupole, 4, 0, 'steps')
entry_points = cs_Intentry(lf_quadrupole, 4, 1, 1000)
return
|
gpl-3.0
|
miti0/mosquito
|
market_stats.py
|
1
|
15543
|
"""
import configargparse
from stats.stats import Stats
def main():
stats = Stats()
stats.run()
if __name__ == "__main__":
arg_parser = configargparse.get_argument_parser()
arg_parser.add('-c', '--config', is_config_file=True, help='config file path', default='mosquito.ini')
arg_parser.add("--live", help="REAL trading mode", action='store_true')
arg_parser.add('-v', '--verbosity', help='Verbosity', action='store_true')
args = arg_parser.parse_known_args()[0]
main()
"""
# Works on python3 / requires: pandas, numpy, pymongo, bokeh
# BTC: 1A7K4kgXLSSzvDRjvoGwomvhrNU4CKezEp
# LTC: LWShTeRrZpYS4aJhb6JdP3R9tNFMnZiDo2
import logging
from operator import itemgetter
from math import pi
from time import time
from pymongo import MongoClient
import pandas as pd
import numpy as np
from bokeh.plotting import figure, show
from bokeh.models import NumeralTickFormatter
from bokeh.models import LinearAxis, Range1d
logger = logging.getLogger(__name__)
def rsi(df, window, targetcol='weightedAverage', colname='rsi'):
""" Calculates the Relative Strength Index (RSI) from a pandas dataframe
http://stackoverflow.com/a/32346692/3389859
"""
series = df[targetcol]
delta = series.diff().dropna()
u = delta * 0
d = u.copy()
u[delta > 0] = delta[delta > 0]
d[delta < 0] = -delta[delta < 0]
# first value is sum of avg gains
u[u.index[window - 1]] = np.mean(u[:window])
u = u.drop(u.index[:(window - 1)])
# first value is sum of avg losses
d[d.index[window - 1]] = np.mean(d[:window])
d = d.drop(d.index[:(window - 1)])
rs = u.ewm(com=window - 1,
ignore_na=False,
min_periods=0,
adjust=False).mean() / d.ewm(com=window - 1,
ignore_na=False,
min_periods=0,
adjust=False).mean()
df[colname] = 100 - 100 / (1 + rs)
df[colname].fillna(df[colname].mean(), inplace=True)
return df
def sma(df, window, targetcol='close', colname='sma'):
""" Calculates Simple Moving Average on a 'targetcol' in a pandas dataframe
"""
df[colname] = df[targetcol].rolling(
min_periods=1, window=window, center=False).mean()
return df
def ema(df, window, targetcol='close', colname='ema', **kwargs):
""" Calculates Expodential Moving Average on a 'targetcol' in a pandas
dataframe """
df[colname] = df[targetcol].ewm(
span=window,
min_periods=kwargs.get('min_periods', 1),
adjust=kwargs.get('adjust', True),
ignore_na=kwargs.get('ignore_na', False)
).mean()
df[colname].fillna(df[colname].mean(), inplace=True)
return df
def macd(df, fastcol='emafast', slowcol='sma', colname='macd'):
""" Calculates the differance between 'fastcol' and 'slowcol' in a pandas
dataframe """
df[colname] = df[fastcol] - df[slowcol]
return df
def bbands(df, window, targetcol='close', stddev=2.0):
""" Calculates Bollinger Bands for 'targetcol' of a pandas dataframe """
if not 'sma' in df:
df = sma(df, window, targetcol)
df['sma'].fillna(df['sma'].mean(), inplace=True)
df['bbtop'] = df['sma'] + stddev * df[targetcol].rolling(
min_periods=1,
window=window,
center=False).std()
df['bbtop'].fillna(df['bbtop'].mean(), inplace=True)
df['bbbottom'] = df['sma'] - stddev * df[targetcol].rolling(
min_periods=1,
window=window,
center=False).std()
df['bbbottom'].fillna(df['bbbottom'].mean(), inplace=True)
df['bbrange'] = df['bbtop'] - df['bbbottom']
df['bbpercent'] = ((df[targetcol] - df['bbbottom']) / df['bbrange']) - 0.5
return df
def plotRSI(p, df, plotwidth=800, upcolor='green', downcolor='red'):
# create y axis for rsi
p.extra_y_ranges = {"rsi": Range1d(start=0, end=100)}
p.add_layout(LinearAxis(y_range_name="rsi"), 'right')
# create rsi 'zone' (30-70)
p.patch(np.append(df['date'].values, df['date'].values[::-1]),
np.append([30 for i in df['rsi'].values],
[70 for i in df['rsi'].values[::-1]]),
color='olive',
fill_alpha=0.2,
legend="rsi",
y_range_name="rsi")
candleWidth = (df.iloc[2]['date'].timestamp() -
df.iloc[1]['date'].timestamp()) * plotwidth
# plot green bars
inc = df.rsi >= 50
p.vbar(x=df.date[inc],
width=candleWidth,
top=df.rsi[inc],
bottom=50,
fill_color=upcolor,
line_color=upcolor,
alpha=0.5,
y_range_name="rsi")
# Plot red bars
dec = df.rsi <= 50
p.vbar(x=df.date[dec],
width=candleWidth,
top=50,
bottom=df.rsi[dec],
fill_color=downcolor,
line_color=downcolor,
alpha=0.5,
y_range_name="rsi")
def plotMACD(p, df, color='blue'):
# plot macd
p.line(df['date'], df['macd'], line_width=4,
color=color, alpha=0.8, legend="macd")
p.yaxis[0].formatter = NumeralTickFormatter(format='0.00000000')
def plotCandlesticks(p, df, plotwidth=750, upcolor='green', downcolor='red'):
candleWidth = (df.iloc[2]['date'].timestamp() -
df.iloc[1]['date'].timestamp()) * plotwidth
# Plot candle 'shadows'/wicks
p.segment(x0=df.date,
y0=df.high,
x1=df.date,
y1=df.low,
color="black",
line_width=2)
# Plot green candles
inc = df.close > df.open
p.vbar(x=df.date[inc],
width=candleWidth,
top=df.open[inc],
bottom=df.close[inc],
fill_color=upcolor,
line_width=0.5,
line_color='black')
# Plot red candles
dec = df.open > df.close
p.vbar(x=df.date[dec],
width=candleWidth,
top=df.open[dec],
bottom=df.close[dec],
fill_color=downcolor,
line_width=0.5,
line_color='black')
# format price labels
p.yaxis[0].formatter = NumeralTickFormatter(format='0.00000000')
def plotVolume(p, df, plotwidth=800, upcolor='green', downcolor='red'):
candleWidth = (df.iloc[2]['date'].timestamp() -
df.iloc[1]['date'].timestamp()) * plotwidth
# create new y axis for volume
p.extra_y_ranges = {"volume": Range1d(start=min(df['volume'].values),
end=max(df['volume'].values))}
p.add_layout(LinearAxis(y_range_name="volume"), 'right')
# Plot green candles
inc = df.close > df.open
p.vbar(x=df.date[inc],
width=candleWidth,
top=df.volume[inc],
bottom=0,
alpha=0.1,
fill_color=upcolor,
line_color=upcolor,
y_range_name="volume")
# Plot red candles
dec = df.open > df.close
p.vbar(x=df.date[dec],
width=candleWidth,
top=df.volume[dec],
bottom=0,
alpha=0.1,
fill_color=downcolor,
line_color=downcolor,
y_range_name="volume")
def plotBBands(p, df, color='navy'):
# Plot bbands
p.patch(np.append(df['date'].values, df['date'].values[::-1]),
np.append(df['bbbottom'].values, df['bbtop'].values[::-1]),
color=color,
fill_alpha=0.1,
legend="bband")
# plot sma
p.line(df['date'], df['sma'], color=color, alpha=0.9, legend="sma")
def plotMovingAverages(p, df):
# Plot moving averages
p.line(df['date'], df['emaslow'],
color='orange', alpha=0.9, legend="emaslow")
p.line(df['date'], df['emafast'],
color='red', alpha=0.9, legend="emafast")
class Charter(object):
""" Retrieves 5min candlestick data for a market and saves it in a mongo
db collection. Can display data in a dataframe or bokeh plot."""
def __init__(self, api):
"""
api = poloniex api object
"""
self.api = api
def __call__(self, pair, frame=False):
""" returns raw chart data from the mongo database, updates/fills the
data if needed, the date column is the '_id' of each candle entry, and
the date column has been removed. Use 'frame' to restrict the amount
of data returned.
Example: 'frame=api.YEAR' will return last years data
"""
# use last pair and period if not specified
if not frame:
frame = self.api.YEAR * 10
dbcolName = pair + 'chart'
# get db connection
db = MongoClient()['poloniex'][dbcolName]
# get last candle
try:
last = sorted(
list(db.find({"_id": {"$gt": time() - 60 * 20}})),
key=itemgetter('_id'))[-1]
except:
last = False
# no entrys found, get all 5min data from poloniex
if not last:
logger.warning('%s collection is empty!', dbcolName)
new = self.api.returnChartData(pair,
period=60 * 5,
start=time() - self.api.YEAR * 13)
else:
new = self.api.returnChartData(pair,
period=60 * 5,
start=int(last['_id']))
# add new candles
updateSize = len(new)
logger.info('Updating %s with %s new entrys!',
dbcolName, str(updateSize))
# show the progess
for i in range(updateSize):
print("\r%s/%s" % (str(i + 1), str(updateSize)), end=" complete ")
date = new[i]['date']
del new[i]['date']
db.update_one({'_id': date}, {"$set": new[i]}, upsert=True)
print('')
logger.debug('Getting chart data from db')
# return data from db (sorted just in case...)
return sorted(
list(db.find({"_id": {"$gt": time() - frame}})),
key=itemgetter('_id'))
def dataFrame(self, pair, frame=False, zoom=False, window=120):
""" returns pandas DataFrame from raw db data with indicators.
zoom = passed as the resample(rule) argument to 'merge' candles into a
different timeframe
window = number of candles to use when calculating indicators
"""
data = self.__call__(pair, frame)
# make dataframe
df = pd.DataFrame(data)
# set date column
df['date'] = pd.to_datetime(df["_id"], unit='s')
if zoom:
df.set_index('date', inplace=True)
df = df.resample(rule=zoom,
closed='left',
label='left').apply({'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'quoteVolume': 'sum',
'volume': 'sum',
'weightedAverage': 'mean'})
df.reset_index(inplace=True)
# calculate/add sma and bbands
df = bbands(df, window)
# add slow ema
df = ema(df, window, colname='emaslow')
# add fast ema
df = ema(df, int(window // 3.5), colname='emafast')
# add macd
df = macd(df)
# add rsi
df = rsi(df, window // 5)
# add candle body and shadow size
df['bodysize'] = df['close'] - df['open']
df['shadowsize'] = df['high'] - df['low']
df['percentChange'] = df['close'].pct_change()
df.dropna(inplace=True)
return df
def graph(self, pair, frame=False, zoom=False,
window=120, plot_width=1000, min_y_border=40,
border_color="whitesmoke", background_color="white",
background_alpha=0.4, legend_location="top_left",
tools="pan,wheel_zoom,reset"):
"""
Plots market data using bokeh and returns a 2D array for gridplot
"""
df = self.dataFrame(pair, frame, zoom, window)
#
# Start Candlestick Plot -------------------------------------------
# create figure
candlePlot = figure(
x_axis_type=None,
y_range=(min(df['low'].values) - (min(df['low'].values) * 0.2),
max(df['high'].values) * 1.2),
x_range=(df.tail(int(len(df) // 10)).date.min().timestamp() * 1000,
df.date.max().timestamp() * 1000),
tools=tools,
title=pair,
plot_width=plot_width,
plot_height=int(plot_width // 2.7),
toolbar_location="above")
# add plots
# plot volume
plotVolume(candlePlot, df)
# plot candlesticks
plotCandlesticks(candlePlot, df)
# plot bbands
plotBBands(candlePlot, df)
# plot moving aves
plotMovingAverages(candlePlot, df)
# set legend location
candlePlot.legend.location = legend_location
# set background color
candlePlot.background_fill_color = background_color
candlePlot.background_fill_alpha = background_alpha
# set border color and size
candlePlot.border_fill_color = border_color
candlePlot.min_border_left = min_y_border
candlePlot.min_border_right = candlePlot.min_border_left
#
# Start RSI/MACD Plot -------------------------------------------
# create a new plot and share x range with candlestick plot
rsiPlot = figure(plot_height=int(candlePlot.plot_height // 2.5),
x_axis_type="datetime",
y_range=(-(max(df['macd'].values) * 2),
max(df['macd'].values) * 2),
x_range=candlePlot.x_range,
plot_width=candlePlot.plot_width,
title=None,
toolbar_location=None)
# plot macd
plotMACD(rsiPlot, df)
# plot rsi
plotRSI(rsiPlot, df)
# set background color
rsiPlot.background_fill_color = candlePlot.background_fill_color
rsiPlot.background_fill_alpha = candlePlot.background_fill_alpha
# set border color and size
rsiPlot.border_fill_color = candlePlot.border_fill_color
rsiPlot.min_border_left = candlePlot.min_border_left
rsiPlot.min_border_right = candlePlot.min_border_right
rsiPlot.min_border_bottom = 20
# orient x labels
rsiPlot.xaxis.major_label_orientation = pi / 4
# set legend
rsiPlot.legend.location = legend_location
# set dataframe 'date' as index
df.set_index('date', inplace=True)
# return layout and df
return [[candlePlot], [rsiPlot]], df
if __name__ == '__main__':
from poloniex import Poloniex
from bokeh.layouts import gridplot
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("poloniex").setLevel(logging.INFO)
logging.getLogger('requests').setLevel(logging.ERROR)
api = Poloniex(jsonNums=float)
layout, df = Charter(api).graph('USDT_BTC', window=90,
frame=api.YEAR * 12, zoom='1W')
print(df.tail())
p = gridplot(layout)
show(p)
|
gpl-3.0
|
scienceopen/madrigalgps
|
dev/full2.py
|
1
|
8980
|
from __future__ import division
from datetime import datetime
from ephem import readtle,Observer
import numpy as np
from pandas import date_range, DataFrame,Panel
from pandas.io.pytables import read_hdf
from re import search
import h5py
import matplotlib.pyplot as plt
from time import time
from glob import glob
import math
from warnings import warn
from os import remove
def dateCalc(ow,os):
if os<0:
ow-=1
os+=7*24*60*60
dic = dict([(6,30),(7,31),(8,31),(9,30),(10,31),(11,30),(12,31),(1,31),(2,28),(3,31),(4,30),(5,31)])
odays = os/(60*60*24) + ow*7
oseconds = os%(60*60*24)
seconds = 0
minute = 1
hour = 5
month = 1
year = 1980
day = 6
for i in range(odays):
if year%4==0:
dic[2]=29
else:
dic[2]=28
if day==dic[month]:
if month==12:
month=1
year+=1
else:
month+=1
day=1
else:
day+=1
hour += oseconds/(60*60)
if hour>24:
day += 1
hour -= 24
oseconds-=(60*60*(hour-5))
minute=oseconds/60
seconds=oseconds%60
return datetime(year,month,day,hour,minute,seconds)
def loopsat(tlefn,dates,obslla):
obs = setupobs(obslla)
return compsat(tlefn,obs,dates)[0]
def setupobs(lla):
obs = Observer()
try:
obs.lat = str(lla[0]); obs.lon = str(lla[1]); obs.elevation=float(lla[2])
except ValueError:
warn('observation location not specified. defaults to lat=0, lon=0')
return obs
def compsat(tlefn,obs,dates):
cols = ['az','el','lat','lon','alt','srange']
sats,satnum = loadTLE(tlefn)
data = Panel(items=dates, major_axis=satnum, minor_axis=cols)
for j,d in enumerate(dates):
obs.date = d
df = DataFrame(index=satnum,columns=cols)
for i,s in enumerate(sats):
si = satnum[i]
s.compute(obs)
if np.isfinite(s.sublat): #if sublat is nan, that means SGP4 couldn't solve for position
df.at[si,['lat','lon','alt']] = np.degrees(s.sublat), np.degrees(s.sublong), s.elevation
df.at[si,['az','el','srange']] = np.degrees(s.az), np.degrees(s.alt), s.range
#FIXME: add dropna for times that sublat is NaN
belowhoriz = df['el']<0
df.ix[belowhoriz,['az','el','srange']] = np.nan
data[d] = df
return data,belowhoriz
def loadTLE(filename):
""" Loads a TLE file and creates a list of satellites.
http://blog.thetelegraphic.com/2012/gps-sattelite-tracking-in-python-using-pyephem/
"""
#pat = '(?<=PRN)\d\d'
with open(filename,'r') as f:
satlist = []; prn = []
l1 = f.readline()
while l1:
l2 = f.readline()
l3 = f.readline()
sat = readtle(l1,l2,l3)
satlist.append(sat)
prn.append(int(search(r'(?<=PRN)\s*\d\d',sat.name).group()))
l1 = f.readline()
return satlist,prn
def makeDates(sy,smo,sd):
# 75x faster than for loop
return date_range(start='{}-{}-{}T00:00:00'.format(sy,smo,sd),
end='{}-{}-{}T12:00:00'.format(sy,smo,sd),
freq=satfreq,closed='left').to_pydatetime().tolist()
def findIntersection(satdata,beamisr,dates,beamfn,maxdist):
"""
iterate over time: for each time, was there a beam intersection for any satellite?
There are 477 beams and 32 satellites.
Would possibly be more efficient to use k-dimensional tree.
In lieu of that, generally chose to loop over the variable with fewer elements for greater speed.
Note: there are a lot of NaN satellite entries, making the satellite looping even faster
"""
beamisr.loc[beamisr['AZM']<0,'AZM'] += 360
#make a column (minor_axis) to store beam intersection ID for each sat at each time
satdata.loc[:,:,'intersect'] = np.NaN
for t,df in satdata.iteritems(): # for each time...
#throw away satellites below horizon (majority are discarded for any time)
df.dropna(axis=0,how='any',thresh=4,inplace=True)
for svnum,d in df.iterrows(): # for each sat at this time...
dist = np.hypot(d['az']-beamisr['AZM'], d['el']-beamisr['ELM'])
if dist.min() < maxdist:
satdata.loc[t,svnum,'intersect'] = beamisr.loc[dist.argmin(),'BEAMID']
nIntersect = satdata.loc[:,:,'intersect'].count().sum()
print('{} intersections found across all times and satellites.'.format(nIntersect))
if nIntersect==0:
print 'no intersections found'
return satdata
def checkFile(fn,satdata,beamisr,maxdtsec):
"""
we need to find matching ISR beam IDs very near the time the satellite
passes through the ISR beam.
for speed, use Unix epoch time (seconds since Jan 1, 1970) for comparisons
Note: the Madrigal HDF5 data is read in as a Numpy structured array
Algorithm (not optimized):
1) knowing what satellites will eventually intersect beams, are any of those beamids in this file?
2) knowing what times intersections will occur, do those times exist in this file for those beams?
3) For the beams that meet conditions 1 and 2, compute TEC by numerical integration of NE
output:
tecisr: 2-D DataFrame, beamid x time
"""
h5p = '/Data/Table Layout'
#rows: satellite. cols: time
intersections = satdata.loc[:,:,'intersect']
intersections.dropna(axis=1,how='all',inplace=True)
beamlist = beamisr['BEAMID'].values # have to make a copy to sort
beamlist.sort()
tecisr = DataFrame(index=beamlist, columns=intersections.columns)
try:
with h5py.File(fn,'r',libver='latest') as f:
for t in intersections: #for each time...
#mask for matching beam ids (not necessarily matching in time yet...)
intmask = np.in1d(f[h5p]['beamid'].astype(int),intersections[t].dropna().astype(int))
if not intmask.any(): #no overlap, no point in evaluating times
continue
#mask for matching times (not necessarily matching beamids)
timemask =np.absolute(f[h5p]['ut1_unix'] - (t.to_pydatetime()-datetime(1970,1,1)).total_seconds()) < maxdtsec
#mask for where beamid and times "match"
inttimemask = intmask & timemask
#retrieve "good" rows of HDF5 that are the correct Beam ID(s) and time(s)
intdata = f[h5p][inttimemask]
#TODO not tested past this point
#TODO account for the case where there are two times and one beam that overlap with the satellite.
"""
intdata will have numerous rows corresponding to each matching time & beam id
each row is a range cell. These rows will be numerically integrated over Ne.
"""
uniqbeamid = np.unique(intdata['beamid']).astype(int)
for b in uniqbeamid:
mask = np.isclose(intdata['beamid'],b) #this is one beam's rows, all range bins
mask &= np.isfinite(intdata['nel'][mask]) #dropna
tecisr.loc[b,t] = np.trapz(10**intdata['nel'][mask], intdata['range'][mask])
except:
remove(fn)
tecisr.dropna(axis=1,how='all',inplace=True) #only retain times with TEC data (vast majority don't have)
return tecisr
# glabals
tlefn = 'gps-ops2013.txt'
obslla = [65,-148,0]
beamfn = "PFISRbeammap.h5"
satfreq='1T' #T means minutes
datadir='files'
maxangdist=5 #degrees
maxdtsec = 60
beamisr = read_hdf(beamfn,'data')
flist = glob('files/*.h5')
year = 0
month = 0
day = 0
data = []
for f in flist:
tic = time()
if year!=int(f[9:11])+2000 or month!=int(f[11:13]) or day!=int(f[13:15]):
year = int(f[9:11])+2000
month = int(f[11:13])
day = int(f[13:15])
print 'new date:',month,day,year
dates = makeDates(year,month,day)
satdata = loopsat(tlefn,dates,obslla)
satdata = findIntersection(satdata,beamisr,dates,beamfn,maxangdist)
tecisr = checkFile(f,satdata,beamisr,maxdtsec)
for t in tecisr:
for tec in tecisr[t]:
if not math.isnan(tec):
for bid in satdata[t,:,'intersect']:
if not math.isnan(bid):
current = (t,bid,satdata[t,:,'intersect'][satdata[t,:,'intersect']==bid].index[0],tec)
print current
data.append(current)
print('{:.1f} sec. to compute TEC for {} times in {}'.format(time()-tic,tecisr.shape[1],f))
gpsdata = np.loadtxt('gpsData\ionio_dataout_2013_349_0402.log')
data=np.array(data)
|
gpl-3.0
|
Srisai85/scikit-learn
|
sklearn/cluster/birch.py
|
207
|
22706
|
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
|
bsd-3-clause
|
qifeigit/scikit-learn
|
examples/model_selection/plot_validation_curve.py
|
229
|
1823
|
"""
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
|
bsd-3-clause
|
russel1237/scikit-learn
|
examples/tree/plot_tree_regression_multioutput.py
|
206
|
1800
|
"""
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
0asa/scikit-learn
|
examples/ensemble/plot_gradient_boosting_regularization.py
|
355
|
2843
|
"""
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
|
bsd-3-clause
|
karvenka/sp17-i524
|
project/S17-IO-3012/code/bin/benchmark_replicas_import.py
|
19
|
5474
|
import matplotlib.pyplot as plt
import sys
import pandas as pd
def get_parm():
"""retrieves mandatory parameter to program
@param: none
@type: n/a
"""
try:
return sys.argv[1]
except:
print ('Must enter file name as parameter')
exit()
def read_file(filename):
"""reads a file into a pandas dataframe
@param: filename The name of the file to read
@type: string
"""
try:
return pd.read_csv(filename)
except:
print ('Error retrieving file')
exit()
def select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica):
benchmark_df = benchmark_df[benchmark_df.mongo_version == 34]
benchmark_df = benchmark_df[benchmark_df.test_size == "large"]
if cloud != 'X':
benchmark_df = benchmark_df[benchmark_df.cloud == cloud]
if config_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.config_replicas == config_replicas]
if mongos_instances != 'X':
benchmark_df = benchmark_df[benchmark_df.mongos_instances == mongos_instances]
if shard_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.shard_replicas == shard_replicas]
if shards_per_replica != 'X':
benchmark_df = benchmark_df[benchmark_df.shards_per_replica == shards_per_replica]
# benchmark_df1 = benchmark_df.groupby(['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica']).mean()
# http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
benchmark_df = benchmark_df.groupby(
['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica'], as_index=False).mean()
# http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
# print benchmark_df1['shard_replicas']
# print benchmark_df1
# print benchmark_df
benchmark_df = benchmark_df.sort_values(by='shard_replicas', ascending=1)
return benchmark_df
def make_figure(import_seconds_kilo, replicas_kilo, import_seconds_chameleon, replicas_chameleon, import_seconds_jetstream, replicas_jetstream):
"""formats and creates a line chart
@param1: import_seconds_kilo Array with import_seconds from kilo
@type: numpy array
@param2: replicas_kilo Array with replicas from kilo
@type: numpy array
@param3: import_seconds_chameleon Array with import_seconds from chameleon
@type: numpy array
@param4: replicas_chameleon Array with replicas from chameleon
@type: numpy array
"""
fig = plt.figure()
#plt.title('Average Mongoimport Runtime by Shard Replication Factor')
plt.ylabel('Runtime in Seconds')
plt.xlabel('Degree of Replication Per Set')
# Make the chart
plt.plot(replicas_kilo, import_seconds_kilo, label='Kilo Cloud')
plt.plot(replicas_chameleon, import_seconds_chameleon, label='Chameleon Cloud')
plt.plot(replicas_jetstream, import_seconds_jetstream, label='Jetstream Cloud')
# http://stackoverflow.com/questions/11744990/how-to-set-auto-for-upper-limit-but-keep-a-fixed-lower-limit-with-matplotlib
plt.ylim(ymin=0)
plt.legend(loc='best')
# Show the chart (for testing)
# plt.show()
# Save the chart
fig.savefig('../report/replica_import.png')
# Run the program by calling the functions
if __name__ == "__main__":
filename = get_parm()
benchmark_df = read_file(filename)
cloud = 'kilo'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
import_seconds_kilo = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_kilo = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
cloud = 'chameleon'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
import_seconds_chameleon = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_chameleon = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
cloud = 'jetstream'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
import_seconds_jetstream = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_jetstream = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
make_figure(import_seconds_kilo, replicas_kilo, import_seconds_chameleon, replicas_chameleon, import_seconds_jetstream, replicas_jetstream)
|
apache-2.0
|
ToniRV/Learning-to-navigate-without-a-map
|
rlvision/exps/pg_16_exp.py
|
1
|
7091
|
"""Policy Gradient for Grid 16x16.
It's Keras 2!
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Flatten, InputLayer
from keras.layers import Conv2D, AveragePooling2D
from keras.layers import Activation
from keras.regularizers import l2
import rlvision
from rlvision import grid
# load data
data, value, start_tot, traj_tot, goal_tot, imsize = grid.load_train_grid16()
data = np.asarray(data, dtype="float32")
value = np.asarray(value, dtype="float32")
print ("[MESSAGE] Data Loaded.")
# training 4000 samples, testing 1000 samples
num_train = 1
num_test = 1000
# script parameters
input_dim = imsize[0]*imsize[1]
gamma = 0.99
update_freq = 1
learning_rate = 0.001
resume = False
network_type = "conv"
data_format = "channels_first"
num_output = 8
model_file = "pg16_model.h5"
model_path = os.path.join(rlvision.RLVISION_MODEL, model_file)
def discount_rewards(r):
"""Calculate discount rewards."""
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(xrange(0, r.size)):
if r[t] != 0:
running_add = 0
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
# define model
model = Sequential()
if network_type == "conv":
model.add(Conv2D(32, (7, 7), padding="same",
input_shape=(2, imsize[0], imsize[1]),
kernel_regularizer=l2(0.0001),
data_format=data_format))
model.add(Activation("relu"))
model.add(Conv2D(32, (5, 5), padding="same",
kernel_regularizer=l2(0.0001),
data_format=data_format))
model.add(Activation("relu"))
model.add(Conv2D(32, (5, 5), padding="same",
kernel_regularizer=l2(0.0001),
data_format=data_format))
model.add(Activation("relu"))
model.add(Conv2D(32, (5, 5), padding="same",
kernel_regularizer=l2(0.0001),
data_format=data_format))
model.add(Activation("relu"))
model.add(AveragePooling2D(2, 2))
model.add(Conv2D(32, (3, 3), padding="same",
kernel_regularizer=l2(0.0001),
data_format=data_format))
model.add(Activation("relu"))
model.add(Flatten())
model.add(Dense(num_output, activation="softmax"))
else:
model.add(InputLayer(input_shape=(2, imsize[0], imsize[1])))
model.add(Flatten())
model.add(Dense(500))
model.add(Activation("relu"))
model.add(Dense(300))
model.add(Activation("relu"))
model.add(Dense(300))
model.add(Activation("relu"))
model.add(Dense(num_output, activation="softmax"))
# print model
model.summary()
model.compile(loss="categorical_crossentropy", optimizer="adam")
if resume is True:
model.load_weights(model_path)
print ("[MESSAGE] Model built.")
# training schedule
reward_sum = 0
running_reward = None
episode_number = 0
xs, dlogps, drs, probs = [], [], [], []
train_X, train_Y = [], []
num_victory = 0
# go through entire game space
while True:
for game_idx in xrange(num_train):
for start_pos in [start_tot[game_idx][0]]:
game = grid.Grid(data[game_idx], value[game_idx], imsize,
start_pos, is_po=False)
# until the game is failed
while True:
# game_state = game.get_state()
# plt.subplot(1, 3, 1)
# plt.imshow(game_state[0, 0], cmap="gray")
# plt.subplot(1, 3, 2)
# plt.imshow(game_state[0, 1], cmap="gray")
# plt.subplot(1, 3, 3)
# plt.imshow(game_state[0, 2], cmap="gray")
# plt.show()
# print (game_state[0, 0])
# compute probability
aprob = model.predict(game.get_state()).flatten()
# sample feature
xs.append(game.get_state())
probs.append(model.predict(game.get_state()).flatten())
# sample decision
aprob = aprob/np.sum(aprob)
action = np.random.choice(num_output, 1, p=aprob)[0]
action_flag = game.is_pos_valid(game.action2pos(action))
y = np.zeros((num_output,))
if action_flag is True:
y[action] = 1
# update game and get feedback
game.update_state_from_action(action)
# if the game finished then train the model
reward, state = game.get_state_reward()
# halt game if the action is hit the obstacle
elif action_flag is False:
reward = -1.
state = -1
dlogps.append(np.array(y).astype("float32")-aprob)
reward_sum += reward
drs.append(reward)
if state in [1, -1]:
episode_number += 1
exp = np.vstack(xs)
epdlogp = np.vstack(dlogps)
epr = np.vstack(drs)
discounted_epr = discount_rewards(epr)
discounted_epr -= np.mean(discounted_epr)
discounted_epr /= np.std(discounted_epr)
epdlogp *= discounted_epr
# prepare training batch
train_X.append(xs)
train_Y.append(epdlogp)
xs, dlogps, drs = [], [], []
if episode_number % update_freq == 0:
y_train = probs + learning_rate*np.squeeze(
np.vstack(train_Y))
train_X = np.squeeze(np.vstack(train_X))
if train_X.ndim < 4:
train_X = np.expand_dims(train_X, axis=0)
model.train_on_batch(train_X,
y_train)
train_X, train_Y, probs = [], [], []
os.remove(model_path) \
if os.path.exists(model_path) else None
model.save_weights(model_path)
running_reward = reward_sum if running_reward is None \
else running_reward*0.99+reward_sum*0.01
print ("Environment reset imminent. Total Episode "
"Reward: %f. Running Mean: %f"
% (reward_sum, running_reward))
reward_sum = 0
num_victory = num_victory+1 if state == 1 else num_victory
print ("Episode %d Result: " % (episode_number) +
("Defeat!" if state == -1 else "Victory!"))
print ("Successful rate: %d" %
(num_victory))
# to next game
break
|
mit
|
SchadkoAO/FDTD_Solver
|
postprocessing/calculate_max.py
|
2
|
3747
|
import numpy as np
import matplotlib.pyplot as plt
import argparse
import traceback
import tarfile
import os
import re
import time
def read_tarinfo(fname):
tar = tarfile.open(fname)
return list(tar.getmembers()), tar
def read(file, tar_info):
f = file.extractfile(tar_info)
if f is None:
return None, True
content = f.read()
return content, False
def should_parse_file(tar_file, t_start, t_end):
regex = re.compile(r"i_(\w+)_(\d+)\.txt")
try:
m = regex.search(tar_file.name)
file_type = m.group(1)
t = int(m.group(2))
return t_start <= t <= t_end
except BaseException:
return False
def output_spot(name, content, dir_name):
splited = content.decode("utf-8").split("\n")
# Get area size
first_x = float('inf')
first_y = float('inf')
last_x = 0
last_y = 0
for line in splited:
line_splited = line.split(" ")
if len(line_splited) != 3:
print(line_splited)
continue
y = int(line_splited[1])
x = int(line_splited[0])
if last_x < x:
last_x = x
if first_x > x:
first_x = x
if last_y < y:
last_y = y
if first_y > y:
first_y = y
size_x = last_x - first_x
size_y = last_y - first_y
data = np.zeros((size_y + 1, size_x + 1))
print("File {name} obtain size_x={size_x} size_y={size_y}".format(
name=name,
size_x=size_x,
size_y=size_y
))
# Read to NP array
for line in splited:
line_splited = line.split(" ")
if len(line_splited) != 3:
print(line_splited)
continue
y = int(line_splited[1])
x = int(line_splited[0])
val = float(line_splited[2])
data[y - first_y, x - first_x] = val
max_val = np.max(data[int(0.25 * size_y):int(0.75 * size_y),
int(0.25 * size_x):int(0.75 * size_x)])
with open(name, 'a') as file:
file.write(str(max_val) + "\n")
file.close()
def main(tar_name, t_start, t_end, output_name):
tar_info, file = read_tarinfo(tar_name)
dir_name = "max_{}".format(tar_name.replace(".tar.gz", ""))
print(tar_info)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
for tf in tar_info:
try:
if not should_parse_file(tf, t_start, t_end):
print("Skipping: {}".format(tf.name))
continue
print("Processing {}".format(tf.name))
content, isErr = read(file, tf)
if isErr:
print("Error during file processing: {}".format(tf.name))
continue
output_spot("max_" + tf.name, content, dir_name)
except BaseException:
traceback.print_exc()
continue
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--out_name", help="Path to file in which image will be saved")
parser.add_argument("--tar_file", help="Path to archive")
parser.add_argument("--t_start", help="Time from to start read files")
parser.add_argument("--t_end", help="Time until dumps should ba analized")
args = parser.parse_args()
out_name = str(args.out_name)
tar_file = str(args.tar_file)
t_start = int(args.t_start)
t_end = int(args.t_end)
print("Start processing max: tar_file={tar_file}\n out_name={out_name}\n t_start={t_start}\n t_end={t_end}\n".format(
tar_file=tar_file,
out_name=out_name,
t_start=t_start,
t_end=t_end,
))
main(
tar_name=tar_file,
output_name=out_name,
t_start=t_start,
t_end=t_end,
)
|
mit
|
beepee14/scikit-learn
|
examples/classification/plot_digits_classification.py
|
289
|
2397
|
"""
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
|
bsd-3-clause
|
saketkc/statsmodels
|
statsmodels/sandbox/examples/thirdparty/findow_0.py
|
33
|
2147
|
# -*- coding: utf-8 -*-
"""A quick look at volatility of stock returns for 2009
Just an exercise to find my way around the pandas methods.
Shows the daily rate of return, the square of it (volatility) and
a 5 day moving average of the volatility.
No guarantee for correctness.
Assumes no missing values.
colors of lines in graphs are not great
uses DataFrame and WidePanel to hold data downloaded from yahoo using matplotlib.
I haven't figured out storage, so the download happens at each run
of the script.
getquotes is from pandas\examples\finance.py
Created on Sat Jan 30 16:30:18 2010
Author: josef-pktd
"""
from statsmodels.compat.python import lzip
import numpy as np
import matplotlib.finance as fin
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pa
def getquotes(symbol, start, end):
quotes = fin.quotes_historical_yahoo(symbol, start, end)
dates, open, close, high, low, volume = lzip(*quotes)
data = {
'open' : open,
'close' : close,
'high' : high,
'low' : low,
'volume' : volume
}
dates = pa.Index([dt.datetime.fromordinal(int(d)) for d in dates])
return pa.DataFrame(data, index=dates)
start_date = dt.datetime(2009, 1, 1)
end_date = dt.datetime(2010, 1, 1)
mysym = ['msft', 'ibm', 'goog']
indexsym = ['gspc', 'dji']
# download data
dmall = {}
for sy in mysym:
dmall[sy] = getquotes(sy, start_date, end_date)
# combine into WidePanel
pawp = pa.WidePanel.fromDict(dmall)
print(pawp.values.shape)
# select closing prices
paclose = pawp.getMinorXS('close')
# take log and first difference over time
paclose_ratereturn = paclose.apply(np.log).diff()
plt.figure()
paclose_ratereturn.plot()
plt.title('daily rate of return')
# square the returns
paclose_ratereturn_vol = paclose_ratereturn.apply(lambda x:np.power(x,2))
plt.figure()
plt.title('volatility (with 5 day moving average')
paclose_ratereturn_vol.plot()
# use convolution to get moving average
paclose_ratereturn_vol_mov = paclose_ratereturn_vol.apply(
lambda x:np.convolve(x,np.ones(5)/5.,'same'))
paclose_ratereturn_vol_mov.plot()
#plt.show()
|
bsd-3-clause
|
TomAugspurger/pandas
|
pandas/tests/frame/methods/test_at_time.py
|
1
|
3150
|
from datetime import time
import numpy as np
import pytest
import pytz
from pandas import DataFrame, date_range
import pandas._testing as tm
class TestAtTime:
def test_at_time(self):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time("9:30")
expected = ts.at_time(time(9, 30))
tm.assert_frame_equal(result, expected)
result = ts.loc[time(9, 30)]
expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range("1/1/2000", "1/31/2000")
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
tm.assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range("1/1/2012", freq="23Min", periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time("16:00")
assert len(rs) == 0
@pytest.mark.parametrize(
"hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)]
)
def test_at_time_errors(self, hour):
# GH#24043
dti = date_range("2018", periods=3, freq="H")
df = DataFrame(list(range(len(dti))), index=dti)
if getattr(hour, "tzinfo", None) is None:
result = df.at_time(hour)
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(ValueError, match="Index must be timezone"):
df.at_time(hour)
def test_at_time_tz(self):
# GH#24043
dti = date_range("2018", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(list(range(len(dti))), index=dti)
result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern")))
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
def test_at_time_raises(self):
# GH#20725
df = DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.at_time("00:00")
@pytest.mark.parametrize("axis", ["index", "columns", 0, 1])
def test_at_time_axis(self, axis):
# issue 8839
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
ts.index, ts.columns = rng, rng
indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]
if axis in ["index", 0]:
expected = ts.loc[indices, :]
elif axis in ["columns", 1]:
expected = ts.loc[:, indices]
result = ts.at_time("9:30", axis=axis)
# Without clearing freq, result has freq 1440T and expected 5T
result.index = result.index._with_freq(None)
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
zuku1985/scikit-learn
|
sklearn/utils/tests/test_sparsefuncs.py
|
78
|
17611
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import (assert_array_almost_equal,
assert_array_equal,
assert_equal)
from numpy.random import RandomState
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
incr_mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import (assign_rows_csr,
inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert_equal(X_means.dtype, output_dtype)
assert_equal(X_vars.dtype, output_dtype)
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert_equal(X_means.dtype, output_dtype)
assert_equal(X_vars.dtype, output_dtype)
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
def test_incr_mean_variance_axis():
for axis in [0, 1]:
rng = np.random.RandomState(0)
n_features = 50
n_samples = 10
data_chunks = [rng.randint(0, 2, size=n_features)
for i in range(n_samples)]
# default params for incr_mean_variance
last_mean = np.zeros(n_features)
last_var = np.zeros_like(last_mean)
last_n = 0
# Test errors
X = np.array(data_chunks[0])
X = np.atleast_2d(X)
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
assert_raises(TypeError, incr_mean_variance_axis, axis,
last_mean, last_var, last_n)
assert_raises(TypeError, incr_mean_variance_axis, axis,
last_mean, last_var, last_n)
assert_raises(TypeError, incr_mean_variance_axis, X_lil, axis,
last_mean, last_var, last_n)
# Test _incr_mean_and_var with a 1 row input
X_means, X_vars = mean_variance_axis(X_csr, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_csr, axis, last_mean, last_var, last_n)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr) # X.shape[axis] picks # samples
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
# Test _incremental_mean_and_var with whole data
X = np.vstack(data_chunks)
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_sparse, axis, last_mean,
last_var, last_n)
assert_equal(X_means_incr.dtype, output_dtype)
assert_equal(X_vars_incr.dtype, output_dtype)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-3,
last_mean=None, last_var=None, last_n=None)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=2,
last_mean=None, last_var=None, last_n=None)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-1,
last_mean=None, last_var=None, last_n=None)
def test_densify_rows():
for dtype in (np.float32, np.float64):
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=dtype)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=dtype)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
def test_inplace_normalize():
ones = np.ones((10, 1))
rs = RandomState(10)
for inplace_csr_row_normalize in (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2):
for dtype in (np.float64, np.float32):
X = rs.randn(10, 5).astype(dtype)
X_csr = sp.csr_matrix(X)
inplace_csr_row_normalize(X_csr)
assert_equal(X_csr.dtype, dtype)
if inplace_csr_row_normalize is inplace_csr_row_normalize_l2:
X_csr.data **= 2
assert_array_almost_equal(np.abs(X_csr).sum(axis=1), ones)
|
bsd-3-clause
|
mxjl620/scikit-learn
|
sklearn/feature_selection/rfe.py
|
64
|
17509
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Vincent Michel <vincent.michel@inria.fr>
# Gilles Louppe <g.louppe@gmail.com>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
|
bsd-3-clause
|
elisamussumeci/InfoDenguePredict
|
infodenguepredict/models/deeplearning/mlp.py
|
2
|
2833
|
"""
Created on 27/01/17
by fccoelho
license: GPL V3 or Later
adapted from this example:
http://machinelearningmastery.com/time-series-prediction-with-deep-learning-in-python-with-keras/
"""
import numpy as np
import pandas as pd
from time import time
from matplotlib import pyplot as P
from sklearn import preprocessing
from sklearn.metrics import mean_squared_error, classification_report
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.utils.visualize_util import plot
from infodenguepredict.models.deeplearning.preprocessing import split_data, normalize_data
from infodenguepredict.data.infodengue import get_alerta_table, get_temperature_data, get_tweet_data, \
build_multicity_dataset
def build_model(hidden, features, look_back=10, batch_size=1):
"""
Builds and returns the MLP model with the parameters given
:param hidden: number of hidden nodes
:param features: number of variables in the example table
:param look_back: Number of time-steps to look back before predicting
:param batch_size: batch size for batch training
:return:
"""
model = Sequential()
model.add(Dense(hidden, input_shape=(look_back, features)))
# model.add(Dropout(0.2))
model.add(Dense((prediction_window), activation='relu')) # five time-step ahead prediction
start = time()
model.compile(loss="mse", optimizer="rmsprop")
print("Compilation Time : ", time() - start)
plot(model, to_file='model.png')
return model
def train(model, X_train, Y_train, batch_size=1, epochs=20, overwrite=True):
hist = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=epochs, validation_split=0.05, verbose=1)
model.save_weights('trained_lstm_model.h5', overwrite=overwrite)
return hist
if __name__ == "__main__":
HIDDEN = 256
TIME_WINDOW = 12
BATCH_SIZE = 1
prediction_window = 2 # weeks
# data = get_example_table(3304557) #Nova Iguaçu: 3303500
# data = get_complete_table(3304557)
data = build_multicity_dataset('RJ')
print(data.shape)
target_col = list(data.columns).index('casos_est_3303500')
time_index = data.index
norm_data = normalize_data(data)
print(norm_data.columns, norm_data.shape)
# norm_data.casos_est.plot()
# P.show()
X_train, Y_train, X_test, Y_test = split_data(norm_data,
look_back=TIME_WINDOW, ratio=.7,
predict_n=prediction_window, Y_column=target_col)
print(X_train.shape, Y_train.shape, X_test.shape, Y_test.shape)
model = build_model(HIDDEN, X_train.shape[2], TIME_WINDOW, BATCH_SIZE)
history = train(model, X_train, Y_train, batch_size=1, epochs=30)
model.save('mlp_model')
|
gpl-3.0
|
plotly/dash-core-components
|
tests/integration/graph/test_graph_basics.py
|
1
|
4985
|
import pytest
import pandas as pd
from multiprocessing import Value, Lock
import numpy as np
from time import sleep
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import dash.testing.wait as wait
@pytest.mark.parametrize("is_eager", [True, False])
def test_grbs001_graph_without_ids(dash_dcc, is_eager):
app = dash.Dash(__name__, eager_loading=is_eager)
app.layout = html.Div(
[dcc.Graph(className="graph-no-id-1"), dcc.Graph(className="graph-no-id-2")]
)
dash_dcc.start_server(app)
assert not dash_dcc.wait_for_element(".graph-no-id-1").get_attribute(
"id"
), "the graph should contain no more auto-generated id"
assert not dash_dcc.wait_for_element(".graph-no-id-2").get_attribute(
"id"
), "the graph should contain no more auto-generated id"
@pytest.mark.DCC608
@pytest.mark.parametrize("is_eager", [True, False])
def test_grbs002_wrapped_graph_has_no_infinite_loop(dash_dcc, is_eager):
df = pd.DataFrame(np.random.randn(50, 50))
figure = {
"data": [{"x": df.columns, "y": df.index, "z": df.values, "type": "heatmap"}],
"layout": {"xaxis": {"scaleanchor": "y"}},
}
app = dash.Dash(__name__, eager_loading=is_eager)
app.layout = html.Div(
style={
"backgroundColor": "red",
"height": "100vmin",
"width": "100vmin",
"overflow": "hidden",
"position": "relative",
},
children=[
dcc.Loading(
children=[
dcc.Graph(
id="graph",
figure=figure,
style={
"position": "absolute",
"top": 0,
"left": 0,
"backgroundColor": "blue",
"width": "100%",
"height": "100%",
"overflow": "hidden",
},
)
]
)
],
)
call_count = Value("i", 0)
@app.callback(Output("graph", "figure"), [Input("graph", "relayoutData")])
def selected_df_figure(selection):
call_count.value += 1
figure["data"][0]["x"] = df.columns
figure["data"][0]["y"] = df.index
figure["data"][0]["z"] = df.values
return figure
dash_dcc.start_server(app)
wait.until(lambda: dash_dcc.driver.title == "Dash", timeout=2)
sleep(1)
# TODO: not sure 2 calls actually makes sense here, shouldn't it be 1?
# but that's what we had as of the 608 fix, PR 621, so let's lock that
# in for now.
assert call_count.value == 2
@pytest.mark.DCC672
def test_grbs003_graph_wrapped_in_loading_component_does_not_fail(dash_dcc):
app = dash.Dash(__name__, suppress_callback_exceptions=True)
app.layout = html.Div(
[
html.H1("subplot issue"),
dcc.Location(id="url", refresh=False),
dcc.Loading(id="page-content"),
]
)
@app.callback(Output("page-content", "children"), [Input("url", "pathname")])
def render_page(url):
return [
dcc.Dropdown(
id="my-dropdown",
options=[
{"label": "option 1", "value": "1"},
{"label": "option 2", "value": "2"},
],
value="1",
),
dcc.Graph(id="my-graph"),
]
@app.callback(Output("my-graph", "figure"), [Input("my-dropdown", "value")])
def update_graph(value):
values = [1, 2, 3]
ranges = [1, 2, 3]
return {
"data": [{"x": ranges, "y": values, "line": {"shape": "spline"}}],
}
dash_dcc.start_server(app)
dash_dcc.wait_for_element("#my-graph .main-svg")
assert not dash_dcc.get_logs()
@pytest.mark.DCC837
def test_grbs004_graph_loading_state_updates(dash_dcc):
lock = Lock()
app = dash.Dash(__name__, suppress_callback_exceptions=True)
app.layout = html.Div(
[
html.H1(id="title", children="loading state updates"),
dcc.Graph(id="my-graph"),
]
)
@app.callback(Output("my-graph", "figure"), [Input("title", "n_clicks")])
def update_graph(n_clicks):
values = [0, n_clicks]
ranges = [0, n_clicks]
with lock:
return {
"data": [{"x": ranges, "y": values, "line": {"shape": "spline"}}],
}
dash_dcc.start_server(app)
dash_dcc.wait_for_element("#my-graph:not([data-dash-is-loading])")
with lock:
title = dash_dcc.wait_for_element("#title")
title.click()
dash_dcc.wait_for_element('#my-graph[data-dash-is-loading="true"]')
dash_dcc.wait_for_element("#my-graph:not([data-dash-is-loading])")
assert not dash_dcc.get_logs()
|
mit
|
karthikvadla16/spark-tk
|
regression-tests/sparktkregtests/testcases/scoretests/logistic_regression_test.py
|
12
|
2456
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests Logistic Regression scoring engine """
import unittest
import os
from sparktkregtests.lib import sparktk_test
from sparktkregtests.lib import scoring_utils
class LogisticRegression(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(LogisticRegression, self).setUp()
binomial_dataset = self.get_file("small_logit_binary.csv")
schema = [("vec0", float),
("vec1", float),
("vec2", float),
("vec3", float),
("vec4", float),
("res", int),
("count", int),
("actual", int)]
self.frame = self.context.frame.import_csv(
binomial_dataset, schema=schema, header=True)
def test_model_scoring(self):
"""Test publishing a logistic regression model"""
model = self.context.models.classification.logistic_regression.train(
self.frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
'res')
predict = model.predict(
self.frame,
["vec0", "vec1", "vec2", "vec3", "vec4"])
test_rows = predict.to_pandas(100)
file_name = self.get_name("logistic_regression")
model_path = model.export_to_mar(self.get_export_file(file_name))
with scoring_utils.scorer(
model_path, self.id()) as scorer:
for i, row in test_rows.iterrows():
res = scorer.score(
[dict(zip(["vec0", "vec1", "vec2", "vec3", "vec4"], list(row[0:5])))])
self.assertEqual(
row["predicted_label"], res.json()["data"][0]['PredictedLabel'])
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
apache/flink
|
flink-python/pyflink/table/tests/test_pandas_udaf.py
|
5
|
37026
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import unittest
from pyflink.table import expressions as expr
from pyflink.table.types import DataTypes
from pyflink.table.udf import udaf, udf, AggregateFunction
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkBatchTableTestCase, \
PyFlinkStreamTableTestCase
class BatchPandasUDAFITTests(PyFlinkBatchTableTestCase):
def test_check_result_type(self):
def pandas_udaf():
pass
with self.assertRaises(
TypeError,
msg="Invalid returnType: Pandas UDAF doesn't support DataType type MAP currently"):
udaf(pandas_udaf, result_type=DataTypes.MAP(DataTypes.INT(), DataTypes.INT()),
func_type="pandas")
def test_group_aggregate_function(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[DataTypes.TINYINT(), DataTypes.FLOAT(),
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.INT())])])
self.t_env.register_table_sink("Results", table_sink)
# general udf
add = udf(lambda a: a + 1, result_type=DataTypes.INT())
# pandas udf
substract = udf(lambda a: a - 1, result_type=DataTypes.INT(), func_type="pandas")
max_udaf = udaf(lambda a: (a.max(), a.min()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
t.group_by("a") \
.select(t.a, mean_udaf(add(t.b)), max_udaf(substract(t.c))) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(
actual,
["+I[1, 6.0, +I[5, 2]]", "+I[2, 3.0, +I[3, 2]]", "+I[3, 3.0, +I[2, 2]]"])
def test_group_aggregate_without_keys(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a'],
[DataTypes.INT()])
min_add = udaf(lambda a, b, c: a.min() + b.min() + c.min(),
result_type=DataTypes.INT(), func_type="pandas")
self.t_env.register_table_sink("Results", table_sink)
t.select(min_add(t.a, t.b, t.c)) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[5]"])
def test_group_aggregate_with_aux_group(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[DataTypes.TINYINT(), DataTypes.INT(), DataTypes.FLOAT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.get_config().get_configuration().set_string('python.metric.enabled', 'true')
self.t_env.register_function("max_add", udaf(MaxAdd(),
result_type=DataTypes.INT(),
func_type="pandas"))
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
t.group_by("a") \
.select("a, a + 1 as b, a + 2 as c") \
.group_by("a, b") \
.select("a, b, mean_udaf(b), max_add(b, c, 1)") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 2, 2.0, 6]", "+I[2, 3, 3.0, 8]", "+I[3, 4, 4.0, 10]"])
def test_tumble_group_window_aggregate_function(self):
import datetime
from pyflink.table.window import Tumble
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 4, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 40, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT()
])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
tumble_window = Tumble.over(expr.lit(1).hours) \
.on(expr.col("rowtime")) \
.alias("w")
t.window(tumble_window) \
.group_by("w") \
.select("w.start, w.end, mean_udaf(b)") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.2]",
"+I[2018-03-11 04:00:00.0, 2018-03-11 05:00:00.0, 8.0]"])
def test_slide_group_window_aggregate_function(self):
import datetime
from pyflink.table.window import Slide
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 4, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 40, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e'],
[
DataTypes.TINYINT(),
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT(),
DataTypes.INT()
])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.register_function("max_add", udaf(MaxAdd(),
result_type=DataTypes.INT(),
func_type="pandas"))
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
slide_window = Slide.over(expr.lit(1).hours) \
.every(expr.lit(30).minutes) \
.on(expr.col("rowtime")) \
.alias("w")
t.window(slide_window) \
.group_by("a, w") \
.select("a, w.start, w.end, mean_udaf(b), max_add(b, c, 1)") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 2.0, 6]",
"+I[1, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.5, 7]",
"+I[1, 2018-03-11 03:30:00.0, 2018-03-11 04:30:00.0, 5.5, 14]",
"+I[1, 2018-03-11 04:00:00.0, 2018-03-11 05:00:00.0, 8.0, 14]",
"+I[2, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 1.0, 4]",
"+I[2, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.0, 10]",
"+I[2, 2018-03-11 03:30:00.0, 2018-03-11 04:30:00.0, 3.0, 10]",
"+I[3, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.0, 7]",
"+I[3, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 2.0, 7]"])
def test_over_window_aggregate_function(self):
import datetime
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 1, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'],
[DataTypes.TINYINT(), DataTypes.FLOAT(), DataTypes.INT(), DataTypes.FLOAT(),
DataTypes.FLOAT(), DataTypes.FLOAT(), DataTypes.FLOAT(), DataTypes.FLOAT(),
DataTypes.FLOAT(), DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
self.t_env.register_function("max_add", udaf(MaxAdd(),
result_type=DataTypes.INT(),
func_type="pandas"))
self.t_env.register_table("T", t)
self.t_env.execute_sql("""
insert into Results
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN UNBOUNDED preceding AND UNBOUNDED FOLLOWING),
max_add(b, c)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN UNBOUNDED preceding AND 0 FOLLOWING),
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW),
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND CURRENT ROW)
from T
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 4.3333335, 5, 4.3333335, 3.0, 3.0, 2.5, 4.3333335, 3.0, 2.0]",
"+I[1, 4.3333335, 13, 5.5, 3.0, 3.0, 4.3333335, 8.0, 5.0, 5.0]",
"+I[1, 4.3333335, 6, 4.3333335, 2.0, 3.0, 2.5, 4.3333335, 3.0, 2.0]",
"+I[2, 2.0, 9, 2.0, 4.0, 4.0, 2.0, 2.0, 4.0, 4.0]",
"+I[2, 2.0, 3, 2.0, 2.0, 4.0, 1.0, 2.0, 4.0, 2.0]",
"+I[3, 2.0, 3, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0]"])
class StreamPandasUDAFITTests(PyFlinkStreamTableTestCase):
def test_sliding_group_window_over_time(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_sliding_group_window_over_time.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Slide
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "EventTime")
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
c SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[
DataTypes.TINYINT(),
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
t.window(Slide.over("1.hours").every("30.minutes").on("rowtime").alias("w")) \
.group_by("a, b, w") \
.select("a, w.start, w.end, mean_udaf(c) as b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 2.0]",
"+I[1, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.5]",
"+I[1, 2018-03-11 03:30:00.0, 2018-03-11 04:30:00.0, 5.5]",
"+I[1, 2018-03-11 04:00:00.0, 2018-03-11 05:00:00.0, 8.0]",
"+I[2, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 1.0]",
"+I[2, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.0]",
"+I[2, 2018-03-11 03:30:00.0, 2018-03-11 04:30:00.0, 3.0]",
"+I[3, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.0]",
"+I[3, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 2.0]"])
os.remove(source_path)
def test_sliding_group_window_over_proctime(self):
self.t_env.get_config().get_configuration().set_string("parallelism.default", "1")
from pyflink.table.window import Slide
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a INT,
proctime as PROCTIME()
) with(
'connector' = 'datagen',
'rows-per-second' = '1',
'fields.a.kind' = 'sequence',
'fields.a.start' = '1',
'fields.a.end' = '10'
)
"""
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
iterator = t.select("a, proctime") \
.window(Slide.over("1.seconds").every("1.seconds").on("proctime").alias("w")) \
.group_by("a, w") \
.select("mean_udaf(a) as b, w.start").execute().collect()
result = [i for i in iterator]
# if the WindowAssigner.isEventTime() does not return false,
# the w.start would be 1970-01-01
# TODO: After fixing the TimeZone problem of window with processing time (will be fixed in
# FLIP-162), we should replace it with a more accurate assertion.
self.assertTrue(result[0][1].year > 1970)
def test_sliding_group_window_over_count(self):
self.t_env.get_config().get_configuration().set_string("parallelism.default", "1")
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00',
'3,3,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_sliding_group_window_over_count.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Slide
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "ProcessingTime")
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
c SMALLINT,
protime as PROCTIME()
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
table_sink = source_sink_utils.TestAppendSink(
['a', 'd'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
t.window(Slide.over("2.rows").every("1.rows").on("protime").alias("w")) \
.group_by("a, b, w") \
.select("a, mean_udaf(c) as b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 2.5]", "+I[1, 5.5]", "+I[2, 2.0]", "+I[3, 2.5]"])
os.remove(source_path)
def test_tumbling_group_window_over_time(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_tumbling_group_window_over_time.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Tumble
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "EventTime")
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
c SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e'],
[
DataTypes.TINYINT(),
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
t.window(Tumble.over("1.hours").on("rowtime").alias("w")) \
.group_by("a, b, w") \
.select("a, w.start, w.end, w.rowtime, mean_udaf(c) as b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, [
"+I[1, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2018-03-11 03:59:59.999, 2.5]",
"+I[1, 2018-03-11 04:00:00.0, 2018-03-11 05:00:00.0, 2018-03-11 04:59:59.999, 8.0]",
"+I[2, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2018-03-11 03:59:59.999, 2.0]",
"+I[3, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2018-03-11 03:59:59.999, 2.0]",
])
os.remove(source_path)
def test_tumbling_group_window_over_count(self):
self.t_env.get_config().get_configuration().set_string("parallelism.default", "1")
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00',
'3,3,3,2018-03-11 03:30:00',
'1,1,4,2018-03-11 04:20:00',
]
source_path = tmp_dir + '/test_group_window_aggregate_function_over_count.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Tumble
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "ProcessingTime")
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
c SMALLINT,
protime as PROCTIME()
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
table_sink = source_sink_utils.TestAppendSink(
['a', 'd'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
t.window(Tumble.over("2.rows").on("protime").alias("w")) \
.group_by("a, b, w") \
.select("a, mean_udaf(c) as b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 2.5]", "+I[1, 6.0]", "+I[2, 2.0]", "+I[3, 2.5]"])
os.remove(source_path)
def test_row_time_over_range_window_aggregate_function(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2013-01-01 03:10:00',
'3,2,2013-01-01 03:10:00',
'2,1,2013-01-01 03:10:00',
'1,5,2013-01-01 03:10:00',
'1,8,2013-01-01 04:20:00',
'2,3,2013-01-01 03:30:00'
]
source_path = tmp_dir + '/test_over_range_window_aggregate_function.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
max_add_min_udaf = udaf(lambda a: a.max() + a.min(),
result_type=DataTypes.SMALLINT(),
func_type='pandas')
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "EventTime")
self.t_env.register_function("mean_udaf", mean_udaf)
self.t_env.register_function("max_add_min_udaf", max_add_min_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT(),
DataTypes.SMALLINT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.execute_sql("""
insert into Results
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND CURRENT ROW),
max_add_min_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND CURRENT ROW)
from source_table
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 3.0, 6]",
"+I[1, 3.0, 6]",
"+I[1, 8.0, 16]",
"+I[2, 1.0, 2]",
"+I[2, 2.0, 4]",
"+I[3, 2.0, 4]"])
os.remove(source_path)
def test_row_time_over_rows_window_aggregate_function(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2013-01-01 03:10:00',
'3,2,2013-01-01 03:10:00',
'2,1,2013-01-01 03:10:00',
'1,5,2013-01-01 03:10:00',
'1,8,2013-01-01 04:20:00',
'2,3,2013-01-01 03:30:00'
]
source_path = tmp_dir + '/test_over_rows_window_aggregate_function.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
max_add_min_udaf = udaf(lambda a: a.max() + a.min(),
result_type=DataTypes.SMALLINT(),
func_type='pandas')
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "EventTime")
self.t_env.register_function("mean_udaf", mean_udaf)
self.t_env.register_function("max_add_min_udaf", max_add_min_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT(),
DataTypes.SMALLINT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.execute_sql("""
insert into Results
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW),
max_add_min_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
from source_table
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 1.0, 2]",
"+I[1, 3.0, 6]",
"+I[1, 6.5, 13]",
"+I[2, 1.0, 2]",
"+I[2, 2.0, 4]",
"+I[3, 2.0, 4]"])
os.remove(source_path)
def test_proc_time_over_rows_window_aggregate_function(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2013-01-01 03:10:00',
'3,2,2013-01-01 03:10:00',
'2,1,2013-01-01 03:10:00',
'1,5,2013-01-01 03:10:00',
'1,8,2013-01-01 04:20:00',
'2,3,2013-01-01 03:30:00'
]
source_path = tmp_dir + '/test_over_rows_window_aggregate_function.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
max_add_min_udaf = udaf(lambda a: a.max() + a.min(),
result_type=DataTypes.SMALLINT(),
func_type='pandas')
self.t_env.get_config().get_configuration().set_string("parallelism.default", "1")
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "ProcessingTime")
self.t_env.register_function("mean_udaf", mean_udaf)
self.t_env.register_function("max_add_min_udaf", max_add_min_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
proctime as PROCTIME()
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT(),
DataTypes.SMALLINT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.execute_sql("""
insert into Results
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY proctime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW),
max_add_min_udaf(b)
over (PARTITION BY a ORDER BY proctime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
from source_table
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 1.0, 2]",
"+I[1, 3.0, 6]",
"+I[1, 6.5, 13]",
"+I[2, 1.0, 2]",
"+I[2, 2.0, 4]",
"+I[3, 2.0, 4]"])
os.remove(source_path)
def test_execute_over_aggregate_from_json_plan(self):
# create source file path
tmp_dir = self.tempdir
data = [
'1,1,2013-01-01 03:10:00',
'3,2,2013-01-01 03:10:00',
'2,1,2013-01-01 03:10:00',
'1,5,2013-01-01 03:10:00',
'1,8,2013-01-01 04:20:00',
'2,3,2013-01-01 03:30:00'
]
source_path = tmp_dir + '/test_execute_over_aggregate_from_json_plan.csv'
sink_path = tmp_dir + '/test_execute_over_aggregate_from_json_plan'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
source_table = """
CREATE TABLE source_table (
a TINYINT,
b SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) WITH (
'connector' = 'filesystem',
'path' = '%s',
'format' = 'csv'
)
""" % source_path
self.t_env.execute_sql(source_table)
self.t_env.execute_sql("""
CREATE TABLE sink_table (
a TINYINT,
b FLOAT,
c SMALLINT
) WITH (
'connector' = 'filesystem',
'path' = '%s',
'format' = 'csv'
)
""" % sink_path)
max_add_min_udaf = udaf(lambda a: a.max() + a.min(),
result_type=DataTypes.SMALLINT(),
func_type='pandas')
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "EventTime")
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
self.t_env.create_temporary_system_function("max_add_min_udaf", max_add_min_udaf)
json_plan = self.t_env._j_tenv.getJsonPlan("""
insert into sink_table
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW),
max_add_min_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
from source_table
""")
from py4j.java_gateway import get_method
get_method(self.t_env._j_tenv.executeJsonPlan(json_plan), "await")()
import glob
lines = [line.strip() for file in glob.glob(sink_path + '/*') for line in open(file, 'r')]
lines.sort()
self.assertEqual(lines, ['1,1.0,2', '1,3.0,6', '1,6.5,13', '2,1.0,2', '2,2.0,4', '3,2.0,4'])
@udaf(result_type=DataTypes.FLOAT(), func_type="pandas")
def mean_udaf(v):
return v.mean()
class MaxAdd(AggregateFunction, unittest.TestCase):
def open(self, function_context):
mg = function_context.get_metric_group()
self.counter = mg.add_group("key", "value").counter("my_counter")
self.counter_sum = 0
def get_value(self, accumulator):
# counter
self.counter.inc(10)
self.counter_sum += 10
return accumulator[0]
def create_accumulator(self):
return []
def accumulate(self, accumulator, *args):
result = 0
for arg in args:
result += arg.max()
accumulator.append(result)
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
jzt5132/scikit-learn
|
sklearn/ensemble/weight_boosting.py
|
71
|
40664
|
"""Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <noel@dawe.me>
# Gilles Louppe <g.louppe@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# Arnaud Joly <arnaud.v.joly@gmail.com>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
|
bsd-3-clause
|
miloharper/neural-network-animation
|
matplotlib/sphinxext/plot_directive.py
|
11
|
26894
|
"""
A directive for including a matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one my specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool or str
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files. If the ``:context: reset`` is specified,
the context is reset for this and future plots.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_html_show_source_link
Whether to show a link to the source in HTML.
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directory where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behavior
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing restructured text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import sys, os, shutil, io, re, textwrap
from os.path import relpath
import traceback
if not six.PY3:
import cStringIO
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[^0-9]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_context(arg):
if arg in [None, 'reset']:
return arg
else:
raise ValueError("argument should be None or 'reset'")
return directives.choice(arg, ('None', 'reset'))
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in six.iteritems(document.nametypes):
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': _option_context,
'nofigs': directives.flag,
'encoding': directives.encoding
}
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
app.add_config_value('plot_pre_code', None, True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_html_show_source_link', True, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_config_value('plot_rcparams', {}, True)
app.add_config_value('plot_apply_rcparams', False, True)
app.add_config_value('plot_working_directory', None, True)
app.add_config_value('plot_template', None, True)
app.connect(str('doctree-read'), mark_plot_labels)
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
def remove_coding(text):
"""
Remove the coding comment, which six.exec_ doesn't like.
"""
return re.sub(
"^#\s*-\*-\s*coding:\s*.*-\*-$", "", text, flags=re.MULTILINE)
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{{ only_latex }}
{% for img in images %}
{% if 'pdf' in img.formats -%}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endif -%}
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
if six.PY2:
pwd = os.getcwdu()
else:
pwd = os.getcwd()
old_sys_path = list(sys.path)
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`plot_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`plot_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.plot_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
# Redirect stdout
stdout = sys.stdout
if six.PY3:
sys.stdout = io.StringIO()
else:
sys.stdout = cStringIO.StringIO()
# Assign a do-nothing print function to the namespace. There
# doesn't seem to be any other way to provide a way to (not) print
# that works correctly across Python 2 and 3.
def _dummy_print(*arg, **kwarg):
pass
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
six.exec_(six.text_type("import numpy as np\n" +
"from matplotlib import pyplot as plt\n"), ns)
else:
six.exec_(six.text_type(setup.config.plot_pre_code), ns)
ns['print'] = _dummy_print
if "__main__" in code:
six.exec_("__name__ = '__main__'", ns)
code = remove_coding(code)
six.exec_(code, ns)
if function_name is not None:
six.exec_(function_name + "()", ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def clear_state(plot_rcparams, close=True):
if close:
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config, context_reset=False):
"""
Run a pyplot script and save the low and high res PNGs and a PDF
in *output_dir*.
Save the images under *output_dir* with file names derived from
*output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
if isinstance(plot_formats, six.string_types):
plot_formats = eval(plot_formats)
for fmt in plot_formats:
if isinstance(fmt, six.string_types):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# We didn't find the files, so build them
results = []
if context:
ns = plot_context
else:
ns = {}
if context_reset:
clear_state(config.plot_rcparams)
for i, code_piece in enumerate(code_pieces):
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
run_code(code_piece, code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except Exception as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
return results
def run(arguments, content, options, state_machine, state, lineno):
# The user may provide a filename *or* Python code content, but not both
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
nofigs = 'nofigs' in options
options.setdefault('include-source', config.plot_include_source)
context = 'context' in options
context_reset = True if (context and options['context'] == 'reset') else False
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with io.open(source_file_name, 'r', encoding='utf-8') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code, source_file_name, build_dir, output_base,
context, function_name, config,
context_reset=context_reset)
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
# Not-None src_link signals the need for a source link in the generated
# html
if j == 0 and config.plot_html_show_source_link:
src_link = source_link
else:
src_link = None
result = format_template(
config.plot_template or TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats and not nofigs,
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
if not os.path.exists(dest_dir):
cbook.mkdirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with io.open(target_name, 'w', encoding="utf-8") as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
|
mit
|
scribble/scribble.github.io
|
src/main/jbake/assets/docs/lchannels/scripts/plot-benchmark.py
|
2
|
2325
|
#!/usr/bin/env python
import matplotlib.pyplot as plotlib
import numpy
import sys
DELIMITER = ','
def makePlot(infile, outfile):
import matplotlib
matplotlib.rcParams.update({'font.size': 12})
(title, headers) = readTitleAndHeaders(infile)
data = numpy.genfromtxt(infile,
delimiter=DELIMITER,
skip_header=1, # Skip benchmark title
dtype=numpy.long) / 1000000.0
box_colours = ['ForestGreen', 'SkyBlue', 'Tan', 'Plum', 'ForestGreen', 'Maroon', 'ForestGreen']
locations = range(1, len(headers) + 1)
fig = plotlib.figure()
plot = plotlib.boxplot(data, widths=0.7, notch=True, positions=locations,
patch_artist=True,
sym='') # Do not print outliers
for box, colour in zip(plot['boxes'], box_colours):
plotlib.setp(box, #color='DarkMagenta',
linewidth=1,
facecolor=colour)
# plotlib.setp(plot['whiskers'], color='DarkMagenta', linewidth=1)
# plotlib.setp(plot['caps'], color='DarkMagenta', linewidth=1)
# plotlib.setp(plot['fliers'], color='OrangeRed', marker='o', markersize=3)
# plotlib.setp(plot['medians'], color='OrangeRed', linewidth=1)
plotlib.grid(axis='y', # set y-axis grid lines
linestyle='--', # use dashed lines
which='major', # only major ticks
color='lightgrey', # line colour
alpha=0.8) # make lines semi-translucent
plotlib.xticks(locations, # tick marks
headers, # labels
rotation=25) # rotate the labels
plotlib.ylabel('milliseconds') # y-axis label
plotlib.title(title, fontsize=12, fontweight='bold') # plot title
# plotlib.show() # render the plot
fig.savefig(outfile, bbox_inches='tight')
def readTitleAndHeaders(infile):
f = open(infile)
title = f.readline()
headers = map(lambda x: x.replace(' ', "\n"),
f.readline().strip().split(DELIMITER))
f.close()
return (title, headers)
if (__name__ == '__main__'):
infile = sys.argv[1]
outfile = sys.argv[2]
makePlot(infile, outfile)
|
apache-2.0
|
ContinuumIO/blaze
|
blaze/compute/tests/test_numpy_compute.py
|
3
|
20852
|
from __future__ import absolute_import, division, print_function
import pytest
import itertools
import numpy as np
import pandas as pd
from datetime import datetime, date
from blaze.compute.core import compute, compute_up
from blaze.expr import symbol, by, exp, summary, Broadcast, join, concat
from blaze.expr import greatest, least, coalesce
from blaze import sin
import blaze
from odo import into
from datashape import discover, to_numpy, dshape
x = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', 'i8'), ('name', 'S7'), ('amount', 'i8')])
t = symbol('t', discover(x))
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
return c.all()
return c
def test_symbol():
assert eq(compute(t, x), x)
def test_eq():
assert eq(compute(t['amount'] == 100, x),
x['amount'] == 100)
def test_selection():
assert eq(compute(t[t['amount'] == 100], x), x[x['amount'] == 0])
assert eq(compute(t[t['amount'] < 0], x), x[x['amount'] < 0])
def test_arithmetic():
assert eq(compute(t['amount'] + t['id'], x),
x['amount'] + x['id'])
assert eq(compute(t['amount'] * t['id'], x),
x['amount'] * x['id'])
assert eq(compute(t['amount'] % t['id'], x),
x['amount'] % x['id'])
def test_UnaryOp():
assert eq(compute(exp(t['amount']), x),
np.exp(x['amount']))
assert eq(compute(abs(-t['amount']), x),
abs(-x['amount']))
def test_Neg():
assert eq(compute(-t['amount'], x),
-x['amount'])
def test_invert_not():
assert eq(compute(~(t.amount > 0), x),
~(x['amount'] > 0))
def test_Reductions():
assert compute(t['amount'].mean(), x) == x['amount'].mean()
assert compute(t['amount'].count(), x) == len(x['amount'])
assert compute(t['amount'].sum(), x) == x['amount'].sum()
assert compute(t['amount'].min(), x) == x['amount'].min()
assert compute(t['amount'].max(), x) == x['amount'].max()
assert compute(t['amount'].nunique(), x) == len(np.unique(x['amount']))
assert compute(t['amount'].var(), x) == x['amount'].var()
assert compute(t['amount'].std(), x) == x['amount'].std()
assert compute(t['amount'].var(unbiased=True), x) == x['amount'].var(ddof=1)
assert compute(t['amount'].std(unbiased=True), x) == x['amount'].std(ddof=1)
assert compute((t['amount'] > 150).any(), x) == True
assert compute((t['amount'] > 250).all(), x) == False
assert compute(t['amount'][0], x) == x['amount'][0]
assert compute(t['amount'][-1], x) == x['amount'][-1]
def test_count_string():
s = symbol('name', 'var * ?string')
x = np.array(['Alice', np.nan, 'Bob', 'Denis', 'Edith'], dtype='object')
assert compute(s.count(), x) == 4
def test_reductions_on_recarray():
assert compute(t.count(), x) == len(x)
def test_count_nan():
t = symbol('t', '3 * ?real')
x = np.array([1.0, np.nan, 2.0])
assert compute(t.count(), x) == 2
def test_distinct():
x = np.array([('Alice', 100),
('Alice', -200),
('Bob', 100),
('Bob', 100)],
dtype=[('name', 'S5'), ('amount', 'i8')])
t = symbol('t', 'var * {name: string, amount: int64}')
assert eq(compute(t['name'].distinct(), x),
np.unique(x['name']))
assert eq(compute(t.distinct(), x),
np.unique(x))
def test_distinct_on_recarray():
rec = pd.DataFrame(
[[0, 1],
[0, 2],
[1, 1],
[1, 2]],
columns=('a', 'b'),
).to_records(index=False)
s = symbol('s', discover(rec))
assert (
compute(s.distinct('a'), rec) ==
pd.DataFrame(
[[0, 1],
[1, 1]],
columns=('a', 'b'),
).to_records(index=False)
).all()
def test_distinct_on_structured_array():
arr = np.array(
[(0., 1.),
(0., 2.),
(1., 1.),
(1., 2.)],
dtype=[('a', 'f4'), ('b', 'f4')],
)
s = symbol('s', discover(arr))
assert(
compute(s.distinct('a'), arr) ==
np.array([(0., 1.), (1., 1.)], dtype=arr.dtype)
).all()
def test_distinct_on_str():
rec = pd.DataFrame(
[['a', 'a'],
['a', 'b'],
['b', 'a'],
['b', 'b']],
columns=('a', 'b'),
).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')])
s = symbol('s', discover(rec))
assert (
compute(s.distinct('a'), rec) ==
pd.DataFrame(
[['a', 'a'],
['b', 'a']],
columns=('a', 'b'),
).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')])
).all()
def test_sort():
assert eq(compute(t.sort('amount'), x),
np.sort(x, order='amount'))
assert eq(compute(t.sort('amount', ascending=False), x),
np.sort(x, order='amount')[::-1])
assert eq(compute(t.sort(['amount', 'id']), x),
np.sort(x, order=['amount', 'id']))
assert eq(compute(t.amount.sort(), x),
np.sort(x['amount']))
def test_head():
assert eq(compute(t.head(2), x),
x[:2])
def test_tail():
assert eq(compute(t.tail(2), x),
x[-2:])
def test_label():
expected = x['amount'] * 10
expected = np.array(expected, dtype=[('foo', 'i8')])
assert eq(compute((t['amount'] * 10).label('foo'), x),
expected)
def test_relabel():
expected = np.array(x, dtype=[('ID', 'i8'), ('NAME', 'S7'), ('amount', 'i8')])
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), x)
assert result.dtype.names == expected.dtype.names
assert eq(result, expected)
def test_by():
expr = by(t.amount > 0, count=t.id.count())
result = compute(expr, x)
assert set(map(tuple, into(list, result))) == set([(False, 2), (True, 3)])
def test_compute_up_field():
assert eq(compute(t['name'], x), x['name'])
def test_compute_up_projection():
assert eq(compute_up(t[['name', 'amount']], x), x[['name', 'amount']])
ax = np.arange(30, dtype='f4').reshape((5, 3, 2))
a = symbol('a', discover(ax))
def test_slice():
inds = [0, slice(2), slice(1, 3), slice(None, None, 2), [1, 2, 3],
(0, 1), (0, slice(1, 3)), (slice(0, 3), slice(3, 1, -1)),
(0, [1, 2])]
for s in inds:
assert (compute(a[s], ax) == ax[s]).all()
def test_array_reductions():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis), ax), ax.sum(axis=axis))
assert eq(compute(a.std(axis=axis), ax), ax.std(axis=axis))
def test_array_reductions_with_keepdims():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis, keepdims=True), ax),
ax.sum(axis=axis, keepdims=True))
def test_summary_on_ndarray():
assert compute(summary(total=a.sum(), min=a.min()), ax) == \
(ax.min(), ax.sum())
result = compute(summary(total=a.sum(), min=a.min(), keepdims=True), ax)
expected = np.array([(ax.min(), ax.sum())],
dtype=[('min', 'float32'), ('total', 'float64')])
assert result.ndim == ax.ndim
assert eq(expected, result)
def test_summary_on_ndarray_with_axis():
for axis in [0, 1, (1, 0)]:
expr = summary(total=a.sum(), min=a.min(), axis=axis)
result = compute(expr, ax)
shape, dtype = to_numpy(expr.dshape)
expected = np.empty(shape=shape, dtype=dtype)
expected['total'] = ax.sum(axis=axis)
expected['min'] = ax.min(axis=axis)
assert eq(result, expected)
def test_utcfromtimestamp():
t = symbol('t', '1 * int64')
data = np.array([0, 1])
expected = np.array(['1970-01-01T00:00:00Z', '1970-01-01T00:00:01Z'],
dtype='M8[us]')
assert eq(compute(t.utcfromtimestamp, data), expected)
def test_nelements_structured_array():
assert compute(t.nelements(), x) == len(x)
assert compute(t.nelements(keepdims=True), x) == (len(x),)
def test_nelements_array():
t = symbol('t', '5 * 4 * 3 * float64')
x = np.random.randn(*t.shape)
result = compute(t.nelements(axis=(0, 1)), x)
np.testing.assert_array_equal(result, np.array([20, 20, 20]))
result = compute(t.nelements(axis=1), x)
np.testing.assert_array_equal(result, 4 * np.ones((5, 3)))
def test_nrows():
assert compute(t.nrows, x) == len(x)
dts = np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:05Z'],
dtype='M8[us]')
s = symbol('s', 'var * datetime')
def test_datetime_truncation():
assert eq(compute(s.truncate(1, 'day'), dts),
dts.astype('M8[D]'))
assert eq(compute(s.truncate(2, 'seconds'), dts),
np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:04Z'],
dtype='M8[s]'))
assert eq(compute(s.truncate(2, 'weeks'), dts),
np.array(['2000-06-18', '2000-06-18'], dtype='M8[D]'))
assert into(list, compute(s.truncate(1, 'week'), dts))[0].isoweekday() == 7
def test_hour():
dts = [datetime(2000, 6, 20, 1, 00, 00),
datetime(2000, 6, 20, 12, 59, 59),
datetime(2000, 6, 20, 12, 00, 00),
datetime(2000, 6, 20, 11, 59, 59)]
dts = into(np.ndarray, dts)
assert eq(compute(s.truncate(1, 'hour'), dts),
into(np.ndarray, [datetime(2000, 6, 20, 1, 0),
datetime(2000, 6, 20, 12, 0),
datetime(2000, 6, 20, 12, 0),
datetime(2000, 6, 20, 11, 0)]))
def test_month():
dts = [datetime(2000, 7, 1),
datetime(2000, 6, 30),
datetime(2000, 6, 1),
datetime(2000, 5, 31)]
dts = into(np.ndarray, dts)
assert eq(compute(s.truncate(1, 'month'), dts),
into(np.ndarray, [date(2000, 7, 1),
date(2000, 6, 1),
date(2000, 6, 1),
date(2000, 5, 1)]))
def test_truncate_on_np_datetime64_scalar():
s = symbol('s', 'datetime')
data = np.datetime64('2000-01-02T12:30:00Z')
assert compute(s.truncate(1, 'day'), data) == data.astype('M8[D]')
def test_numpy_and_python_datetime_truncate_agree_on_start_of_week():
s = symbol('s', 'datetime')
n = np.datetime64('2014-11-11')
p = datetime(2014, 11, 11)
expr = s.truncate(1, 'week')
assert compute(expr, n) == compute(expr, p)
def test_add_multiple_ndarrays():
a = symbol('a', '5 * 4 * int64')
b = symbol('b', '5 * 4 * float32')
x = np.arange(9, dtype='int64').reshape(3, 3)
y = (x + 1).astype('float32')
expr = sin(a) + 2 * b
scope = {a: x, b: y}
expected = sin(x) + 2 * y
# check that we cast correctly
assert expr.dshape == dshape('5 * 4 * float64')
np.testing.assert_array_equal(compute(expr, scope), expected)
np.testing.assert_array_equal(compute(expr, scope, optimize=False),
expected)
nA = np.arange(30, dtype='f4').reshape((5, 6))
ny = np.arange(6, dtype='f4')
A = symbol('A', discover(nA))
y = symbol('y', discover(ny))
def test_transpose():
assert eq(compute(A.T, nA), nA.T)
assert eq(compute(A.transpose((0, 1)), nA), nA)
def test_dot():
assert eq(compute(y.dot(y), {y: ny}), np.dot(ny, ny))
assert eq(compute(A.dot(y), {A: nA, y: ny}), np.dot(nA, ny))
def test_subexpr_datetime():
data = pd.date_range(start='01/01/2010', end='01/04/2010', freq='D').values
s = symbol('s', discover(data))
result = compute(s.truncate(days=2).day, data)
expected = np.array([31, 2, 2, 4])
np.testing.assert_array_equal(result, expected)
def test_mixed_types():
x = np.array([[(4, 180), (4, 184), (4, 188), (4, 192), (4, 196)],
[(4, 660), (4, 664), (4, 668), (4, 672), (4, 676)],
[(4, 1140), (4, 1144), (4, 1148), (4, 1152), (4, 1156)],
[(4, 1620), (4, 1624), (4, 1628), (4, 1632), (4, 1636)],
[(4, 2100), (4, 2104), (4, 2108), (4, 2112), (4, 2116)]],
dtype=[('count', '<i4'), ('total', '<i8')])
aggregate = symbol('aggregate', discover(x))
result = compute(aggregate.total.sum(axis=(0,)) /
aggregate['count'].sum(axis=(0,)), x)
expected = (x['total'].sum(axis=0, keepdims=True) /
x['count'].sum(axis=0, keepdims=True)).squeeze()
np.testing.assert_array_equal(result, expected)
def test_broadcast_compute_against_numbers_and_arrays():
A = symbol('A', '5 * float32')
a = symbol('a', 'float32')
b = symbol('b', 'float32')
x = np.arange(5, dtype='f4')
expr = Broadcast((A, b), (a, b), a + b)
result = compute(expr, {A: x, b: 10})
assert eq(result, x + 10)
def test_map():
pytest.importorskip('numba')
a = np.arange(10.0)
f = lambda x: np.sin(x) + 1.03 * np.cos(x) ** 2
x = symbol('x', discover(a))
expr = x.map(f, 'float64')
result = compute(expr, a)
expected = f(a)
# make sure we're not going to pandas here
assert type(result) == np.ndarray
assert type(result) == type(expected)
np.testing.assert_array_equal(result, expected)
def test_vector_norm():
x = np.arange(30).reshape((5, 6))
s = symbol('x', discover(x))
assert eq(compute(s.vnorm(), x),
np.linalg.norm(x))
assert eq(compute(s.vnorm(ord=1), x),
np.linalg.norm(x.flatten(), ord=1))
assert eq(compute(s.vnorm(ord=4, axis=0), x),
np.linalg.norm(x, ord=4, axis=0))
expr = s.vnorm(ord=4, axis=0, keepdims=True)
assert expr.shape == compute(expr, x).shape
def test_join():
cities = np.array([('Alice', 'NYC'),
('Alice', 'LA'),
('Bob', 'Chicago')],
dtype=[('name', 'S7'), ('city', 'O')])
c = symbol('cities', discover(cities))
expr = join(t, c, 'name')
result = compute(expr, {t: x, c: cities})
assert (b'Alice', 1, 100, 'LA') in into(list, result)
def test_query_with_strings():
b = np.array([('a', 1), ('b', 2), ('c', 3)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
assert compute(s[s.x == b'b'], b).tolist() == [(b'b', 2)]
@pytest.mark.parametrize('keys', [['a'], list('bc')])
def test_isin(keys):
b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
result = compute(s.x.isin(keys), b)
expected = np.in1d(b['x'], keys)
np.testing.assert_array_equal(result, expected)
def test_nunique_recarray():
b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6),
('a', 1), ('b', 2)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
expr = s.nunique()
assert compute(expr, b) == len(np.unique(b))
def test_str_repeat():
a = np.array(('a', 'b', 'c'))
s = symbol('s', discover(a))
expr = s.repeat(3)
assert all(compute(expr, a) == np.char.multiply(a, 3))
def test_str_interp():
a = np.array(('%s', '%s', '%s'))
s = symbol('s', discover(a))
expr = s.interp(1)
assert all(compute(expr, a) == np.char.mod(a, 1))
def test_timedelta_arith():
dates = np.arange('2014-01-01', '2014-02-01', dtype='datetime64')
delta = np.timedelta64(1, 'D')
sym = symbol('s', discover(dates))
assert (compute(sym + delta, dates) == dates + delta).all()
assert (compute(sym - delta, dates) == dates - delta).all()
assert (
compute(sym - (sym - delta), dates) ==
dates - (dates - delta)
).all()
def test_coerce():
x = np.arange(1, 3)
s = symbol('s', discover(x))
np.testing.assert_array_equal(compute(s.coerce('float64'), x),
np.arange(1.0, 3.0))
def test_concat_arr():
s_data = np.arange(15)
t_data = np.arange(15, 30)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) ==
np.arange(30)
).all()
def test_concat_mat():
s_data = np.arange(15).reshape(5, 3)
t_data = np.arange(15, 30).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) ==
np.arange(30).reshape(10, 3)
).all()
assert (
compute(concat(s, t, axis=1), {s: s_data, t: t_data}) ==
np.concatenate((s_data, t_data), axis=1)
).all()
@pytest.mark.parametrize('dtype', ['int64', 'float64'])
def test_least(dtype):
s_data = np.arange(15, dtype=dtype).reshape(5, 3)
t_data = np.arange(15, 30, dtype=dtype).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
expr = least(s, t)
result = compute(expr, {s: s_data, t: t_data})
expected = np.minimum(s_data, t_data)
assert np.all(result == expected)
@pytest.mark.parametrize('dtype', ['int64', 'float64'])
def test_least_mixed(dtype):
s_data = np.array([2, 1], dtype=dtype)
t_data = np.array([1, 2], dtype=dtype)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
expr = least(s, t)
result = compute(expr, {s: s_data, t: t_data})
expected = np.minimum(s_data, t_data)
assert np.all(result == expected)
@pytest.mark.parametrize('dtype', ['int64', 'float64'])
def test_greatest(dtype):
s_data = np.arange(15, dtype=dtype).reshape(5, 3)
t_data = np.arange(15, 30, dtype=dtype).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
expr = greatest(s, t)
result = compute(expr, {s: s_data, t: t_data})
expected = np.maximum(s_data, t_data)
assert np.all(result == expected)
@pytest.mark.parametrize('dtype', ['int64', 'float64'])
def test_greatest_mixed(dtype):
s_data = np.array([2, 1], dtype=dtype)
t_data = np.array([1, 2], dtype=dtype)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
expr = greatest(s, t)
result = compute(expr, {s: s_data, t: t_data})
expected = np.maximum(s_data, t_data)
assert np.all(result == expected)
binary_name_map = {
'atan2': 'arctan2'
}
@pytest.mark.parametrize(
['func', 'kwargs'],
itertools.product(['copysign', 'ldexp'], [dict(optimize=False), dict()])
)
def test_binary_math(func, kwargs):
s_data = np.arange(15).reshape(5, 3)
t_data = np.arange(15, 30).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
scope = {s: s_data, t: t_data}
result = compute(getattr(blaze, func)(s, t), scope, **kwargs)
expected = getattr(np, binary_name_map.get(func, func))(s_data, t_data)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize(
['func', 'kwargs'],
itertools.product(['atan2', 'hypot'], [dict(optimize=False), dict()])
)
def test_floating_binary_math(func, kwargs):
s_data = np.arange(15).reshape(5, 3)
t_data = np.arange(15, 30).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
scope = {s: s_data, t: t_data}
result = compute(getattr(blaze, func)(s, t), scope, **kwargs)
expected = getattr(np, binary_name_map.get(func, func))(s_data, t_data)
np.testing.assert_allclose(result, expected)
def test_selection_inner_inputs():
s_data = np.arange(5).reshape(5, 1)
t_data = np.arange(5).reshape(5, 1)
s = symbol('s', 'var * {a: int64}')
t = symbol('t', 'var * {a: int64}')
assert (
compute(s[s.a == t.a], {s: s_data, t: t_data}) ==
s_data
).all()
def test_coalesce():
data = np.array([0, None, 1, None, 2, None])
s = symbol('s', 'var * ?int')
t = symbol('t', 'int')
u = symbol('u', '?int')
v = symbol('v', 'var * int')
w = symbol('w', 'var * ?int')
# array to scalar
np.testing.assert_array_equal(
compute(coalesce(s, t), {s: data, t: -1}),
np.array([0, -1, 1, -1, 2, -1]),
)
# array to scalar with NULL
np.testing.assert_array_equal(
compute(coalesce(s, u), {s: data, u: None}),
np.array([0, None, 1, None, 2, None], dtype=object)
)
# array to array
np.testing.assert_array_equal(
compute(coalesce(s, v), {
s: data, v: np.array([-1, -2, -3, -4, -5, -6]),
}),
np.array([0, -2, 1, -4, 2, -6])
)
# array to array with NULL
np.testing.assert_array_equal(
compute(coalesce(s, w), {
s: data, w: np.array([-1, None, -3, -4, -5, -6]),
}),
np.array([0, None, 1, -4, 2, -6]),
)
|
bsd-3-clause
|
teasherm/models
|
vanilla_gan/main.py
|
1
|
1943
|
import os
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from datasets.mnist import load as load_mnist
from lib.utils import get_batches_per_epoch
from vanilla_gan import model
def sample_z(batch_size, z_dim):
return np.random.uniform(-1, 1, size=(batch_size, z_dim))
def plot_samples(samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
return fig
def save_fig(i, path="results/vanilla_gan"):
if not os.path.exists(path):
os.makedirs(path)
plt.savefig("{}/{}.png".format(path, str(i).zfill(3)), bbox_inches="tight")
plt.close()
def train(epochs=50, batch_size=16, z_dim=100):
mnist = load_mnist()
batches_per_epoch = get_batches_per_epoch(batch_size,
mnist.train.num_examples)
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
var_dict = model.build_graph(z_dim)
model.init(sess)
for e in tqdm(range(epochs)):
for b in range(batches_per_epoch):
X, _ = mnist.train.next_batch(batch_size)
X.reshape([-1, 784])
z = sample_z(batch_size, z_dim)
d_loss = model.optimize_discriminator(sess, var_dict, X, z)
z = sample_z(batch_size, z_dim)
g_loss = model.optimize_generator(sess, var_dict, z)
print('Epoch: {}'.format(e))
print('D loss: {:.4}'.format(d_loss))
print('G loss: {:.4}'.format(g_loss))
print()
z = sample_z(batch_size, z_dim)
samples = model.sample_from_generator(sess, var_dict, z)
fig = plot_samples(samples)
save_fig(e)
if __name__ == "__main__":
train()
|
unlicense
|
AnasGhrab/scikit-learn
|
examples/linear_model/plot_sgd_comparison.py
|
167
|
1659
|
"""
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
|
bsd-3-clause
|
TomAugspurger/pandas
|
pandas/core/indexes/range.py
|
1
|
27484
|
from datetime import timedelta
import operator
from sys import getsizeof
from typing import Any, Optional
import warnings
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import Label
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, cache_readonly, doc
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCTimedeltaIndex
from pandas.core import ops
import pandas.core.common as com
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import _index_shared_docs, maybe_extract_name
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.io.formats.printing import pprint_thing
_empty_range = range(0)
class RangeIndex(Int64Index):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
name : object, optional
Name to be stored in the index.
copy : bool, default False
Unused, accepted for homogeneity with other index types.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_range: range
# check whether self._data has been called
_cached_data: Optional[np.ndarray] = None
# --------------------------------------------------------------------
# Constructors
def __new__(
cls, start=None, stop=None, step=None, dtype=None, copy=False, name=None,
):
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if isinstance(start, RangeIndex):
start = start._range
return cls._simple_new(start, name=name)
# validate the arguments
if com.all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be called with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, name=name)
@classmethod
def from_range(cls, data: range, name=None, dtype=None) -> "RangeIndex":
"""
Create RangeIndex from a range object.
Returns
-------
RangeIndex
"""
if not isinstance(data, range):
raise TypeError(
f"{cls.__name__}(...) must be called with object coercible to a "
f"range, {repr(data)} was passed"
)
cls._validate_dtype(dtype)
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: range, name: Label = None) -> "RangeIndex":
result = object.__new__(cls)
assert isinstance(values, range)
result._range = values
result.name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _constructor(self):
""" return the class to use for construction """
return Int64Index
@property
def _data(self):
"""
An int array that for performance reasons is created only when needed.
The constructed array is saved in ``_cached_data``. This allows us to
check if the array has been created without accessing ``_data`` and
triggering the construction.
"""
if self._cached_data is None:
self._cached_data = np.arange(
self.start, self.stop, self.step, dtype=np.int64
)
return self._cached_data
@cache_readonly
def _int64index(self) -> Int64Index:
return Int64Index._simple_new(self._data, name=self.name)
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
rng = self._range
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def _format_with_header(self, header, na_rep="NaN", **kwargs):
return header + list(map(pprint_thing, self._range))
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
"removed in a future version. Use RangeIndex.{} "
"instead"
)
@cache_readonly
def start(self):
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@property
def _start(self):
"""
The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
"""
warnings.warn(
self._deprecation_message.format("_start", "start"),
FutureWarning,
stacklevel=2,
)
return self.start
@cache_readonly
def stop(self):
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self):
"""
The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_stop", "stop"),
FutureWarning,
stacklevel=2,
)
return self.stop
@cache_readonly
def step(self):
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@property
def _step(self):
"""
The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_step", "step"),
FutureWarning,
stacklevel=2,
)
return self.step
@cache_readonly
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
rng = self._range
return getsizeof(rng) + sum(
getsizeof(getattr(rng, attr_name))
for attr_name in ["start", "stop", "step"]
)
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self) -> np.dtype:
return np.dtype(np.int64)
@property
def is_unique(self) -> bool:
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
return self._range.step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self._range.step < 0 or len(self) <= 1
@property
def has_duplicates(self) -> bool:
return False
def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
except TypeError:
return False
return key in self._range
@doc(Int64Index.get_loc)
def get_loc(self, key, method=None, tolerance=None):
if method is None and tolerance is None:
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
raise KeyError(key)
return super().get_loc(key, method=method, tolerance=tolerance)
@Appender(_index_shared_docs["get_indexer"])
def get_indexer(self, target, method=None, limit=None, tolerance=None):
if com.any_not_none(method, tolerance, limit) or not is_list_like(target):
return super().get_indexer(
target, method=method, tolerance=tolerance, limit=limit
)
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
else:
# GH 28678: work on reversed range for simplicity
reverse = self._range[::-1]
start, stop, step = reverse.start, reverse.stop, reverse.step
target_array = np.asarray(target)
if not (is_integer_dtype(target_array) and target_array.ndim == 1):
# checks/conversions/roundings are delegated to general method
return super().get_indexer(target, method=method, tolerance=tolerance)
locs = target_array - start
valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)
locs[~valid] = -1
locs[valid] = locs[valid] / step
if step != self.step:
# We reversed this range: transform to original locs
locs[valid] = len(self) - 1 - locs[valid]
return ensure_platform_int(locs)
def tolist(self):
return list(self._range)
@doc(Int64Index._shallow_copy)
def _shallow_copy(self, values=None, name: Label = no_default):
name = self.name if name is no_default else name
if values is None:
result = self._simple_new(self._range, name=name)
result._cache = self._cache.copy()
return result
else:
return Int64Index._simple_new(values, name=name)
@doc(Int64Index.copy)
def copy(self, name=None, deep=False, dtype=None, **kwargs):
self._validate_dtype(dtype)
if name is None:
name = self.name
return self.from_range(self._range, name=name)
def _minmax(self, meth: str):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0):
return self.start
return self.start + self.step * no_steps
def min(self, axis=None, skipna=True, *args, **kwargs) -> int:
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return self._minmax("min")
def max(self, axis=None, skipna=True, *args, **kwargs) -> int:
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return self._minmax("max")
def argsort(self, *args, **kwargs) -> np.ndarray:
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
argsorted : numpy array
See Also
--------
numpy.ndarray.argsort
"""
nv.validate_argsort(args, kwargs)
if self._range.step > 0:
return np.arange(len(self))
else:
return np.arange(len(self) - 1, -1, -1)
def equals(self, other) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
return self._range == other._range
return super().equals(other)
def intersection(self, other, sort=False):
"""
Form the intersection of two Index objects.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Sort the resulting index if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default to ``False`` to match the behaviour
from before 0.24.0.
Returns
-------
intersection : Index
"""
self._validate_sort_keyword(sort)
if self.equals(other):
return self._get_reconciled_name_object(other)
if not isinstance(other, RangeIndex):
return super().intersection(other, sort=sort)
if not len(self) or not len(other):
return self._simple_new(_empty_range)
first = self._range[::-1] if self.step < 0 else self._range
second = other._range[::-1] if other.step < 0 else other._range
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(first.start, second.start)
int_high = min(first.stop, second.stop)
if int_high <= int_low:
return self._simple_new(_empty_range)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, t = self._extended_gcd(first.step, second.step)
# check whether element sets intersect
if (first.start - second.start) % gcd:
return self._simple_new(_empty_range)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = first.start + (second.start - first.start) * first.step // gcd * s
new_step = first.step * second.step // gcd
new_range = range(tmp_start, int_high, new_step)
new_index = self._simple_new(new_range)
# adjust index to limiting interval
new_start = new_index._min_fitting_element(int_low)
new_range = range(new_start, new_index.stop, new_index.step)
new_index = self._simple_new(new_range)
if (self.step < 0 and other.step < 0) is not (new_index.step < 0):
new_index = new_index[::-1]
if sort is None:
new_index = new_index.sort_values()
return new_index
def _min_fitting_element(self, lower_limit: int) -> int:
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self.start) // abs(self.step))
return self.start + abs(self.step) * no_steps
def _max_fitting_element(self, upper_limit: int) -> int:
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self.start) // abs(self.step)
return self.start + abs(self.step) * no_steps
def _extended_gcd(self, a, b):
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def _union(self, other, sort):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort resulting index. ``sort=None`` returns a
monotonically increasing ``RangeIndex`` if possible or a sorted
``Int64Index`` if not. ``sort=False`` always returns an
unsorted ``Int64Index``
.. versionadded:: 0.25.0
Returns
-------
union : Index
"""
if not len(other) or self.equals(other) or not len(self):
return super()._union(other, sort=sort)
if isinstance(other, RangeIndex) and sort is None:
start_s, step_s = self.start, self.step
end_s = self.start + self.step * (len(self) - 1)
start_o, step_o = other.start, other.step
end_o = other.start + other.step * (len(other) - 1)
if self.step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other.step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self.start - other.start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if (
(start_s - start_o) % step_s == 0
and (start_s - end_o) <= step_s
and (start_o - end_s) <= step_s
):
return type(self)(start_r, end_r + step_s, step_s)
if (
(step_s % 2 == 0)
and (abs(start_s - start_o) <= step_s / 2)
and (abs(end_s - end_o) <= step_s / 2)
):
return type(self)(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if (
(start_o - start_s) % step_s == 0
and (start_o + step_s >= start_s)
and (end_o - step_s <= end_s)
):
return type(self)(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if (
(start_s - start_o) % step_o == 0
and (start_s + step_o >= start_o)
and (end_s - step_o <= end_o)
):
return type(self)(start_r, end_r + step_o, step_o)
return self._int64index._union(other, sort=sort)
@doc(Int64Index.join)
def join(self, other, how="left", level=None, return_indexers=False, sort=False):
if how == "outer" and self is not other:
# note: could return RangeIndex in more circumstances
return self._int64index.join(other, how, level, return_indexers, sort)
return super().join(other, how, level, return_indexers, sort)
def _concat(self, indexes, name):
"""
Overriding parent method for the case of all RangeIndex instances.
When all members of "indexes" are of type RangeIndex: result will be
RangeIndex if possible, Int64Index otherwise. E.g.:
indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])
"""
if not all(isinstance(x, RangeIndex) for x in indexes):
return super()._concat(indexes, name)
start = step = next_ = None
# Filter the empty indexes
non_empty_indexes = [obj for obj in indexes if len(obj)]
for obj in non_empty_indexes:
rng: range = obj._range
if start is None:
# This is set by the first non-empty index
start = rng.start
if step is None and len(rng) > 1:
step = rng.step
elif step is None:
# First non-empty index had only one element
if rng.start == start:
result = Int64Index(np.concatenate([x._values for x in indexes]))
return result.rename(name)
step = rng.start - start
non_consecutive = (step != rng.step and len(rng) > 1) or (
next_ is not None and rng.start != next_
)
if non_consecutive:
result = Int64Index(np.concatenate([x._values for x in indexes]))
return result.rename(name)
if step is not None:
next_ = rng[-1] + step
if non_empty_indexes:
# Get the stop value from "next" or alternatively
# from the last non-empty index
stop = non_empty_indexes[-1].stop if next_ is None else next_
return RangeIndex(start, stop, step).rename(name)
# Here all "indexes" had 0 length, i.e. were empty.
# In this case return an empty range index.
return RangeIndex(0, 0).rename(name)
def __len__(self) -> int:
"""
return the length of the RangeIndex
"""
return len(self._range)
@property
def size(self) -> int:
return len(self)
def __getitem__(self, key):
"""
Conserve RangeIndex type for scalar and slice keys.
"""
if isinstance(key, slice):
new_range = self._range[key]
return self._simple_new(new_range, name=self.name)
elif is_integer(key):
new_key = int(key)
try:
return self._range[new_key]
except IndexError as err:
raise IndexError(
f"index {key} is out of bounds for axis 0 with size {len(self)}"
) from err
elif is_scalar(key):
raise IndexError(
"only integers, slices (`:`), "
"ellipsis (`...`), numpy.newaxis (`None`) "
"and integer or boolean "
"arrays are valid indices"
)
# fall back to Int64Index
return super().__getitem__(key)
@unpack_zerodim_and_defer("__floordiv__")
def __floordiv__(self, other):
if is_integer(other) and other != 0:
if len(self) == 0 or self.start % other == 0 and self.step % other == 0:
start = self.start // other
step = self.step // other
stop = start + len(self) * step
new_range = range(start, stop, step or 1)
return self._simple_new(new_range, name=self.name)
if len(self) == 1:
start = self.start // other
new_range = range(start, start + 1, 1)
return self._simple_new(new_range, name=self.name)
return self._int64index // other
def all(self) -> bool:
return 0 not in self._range
def any(self) -> bool:
return any(self._range)
@classmethod
def _add_numeric_methods_binary(cls):
""" add in numeric methods, specialized to RangeIndex """
def _make_evaluate_binop(op, step=False):
"""
Parameters
----------
op : callable that accepts 2 params
perform the binary op
step : callable, optional, default to False
op to apply to the step parm if not None
if False, use the existing step
"""
@unpack_zerodim_and_defer(op.__name__)
def _evaluate_numeric_binop(self, other):
if isinstance(other, ABCTimedeltaIndex):
# Defer to TimedeltaIndex implementation
return NotImplemented
elif isinstance(other, (timedelta, np.timedelta64)):
# GH#19333 is_integer evaluated True on timedelta64,
# so we need to catch these explicitly
return op(self._int64index, other)
elif is_timedelta64_dtype(other):
# Must be an np.ndarray; GH#22390
return op(self._int64index, other)
other = extract_array(other, extract_numpy=True)
attrs = self._get_attributes_dict()
left, right = self, other
try:
# apply if we have an override
if step:
with np.errstate(all="ignore"):
rstep = step(left.step, right)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = left.step
with np.errstate(all="ignore"):
rstart = op(left.start, right)
rstop = op(left.stop, right)
result = type(self)(rstart, rstop, rstep, **attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not all(is_integer(x) for x in [rstart, rstop, rstep]):
result = result.astype("float64")
return result
except (ValueError, TypeError, ZeroDivisionError):
# Defer to Int64Index implementation
return op(self._int64index, other)
# TODO: Do attrs get handled reliably?
name = f"__{op.__name__}__"
return compat.set_function_name(_evaluate_numeric_binop, name, cls)
cls.__add__ = _make_evaluate_binop(operator.add)
cls.__radd__ = _make_evaluate_binop(ops.radd)
cls.__sub__ = _make_evaluate_binop(operator.sub)
cls.__rsub__ = _make_evaluate_binop(ops.rsub)
cls.__mul__ = _make_evaluate_binop(operator.mul, step=operator.mul)
cls.__rmul__ = _make_evaluate_binop(ops.rmul, step=ops.rmul)
cls.__truediv__ = _make_evaluate_binop(operator.truediv, step=operator.truediv)
cls.__rtruediv__ = _make_evaluate_binop(ops.rtruediv, step=ops.rtruediv)
RangeIndex._add_numeric_methods()
|
bsd-3-clause
|
WayneDW/Sentiment-Analysis-in-Event-Driven-Stock-Price-Movement-Prediction
|
archived/tfidf_tsne.py
|
1
|
1827
|
# Adopted from https://github.com/lazyprogrammer/machine_learning_examples/blob/master/nlp_class2/tfidf_tsne.py
import json
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.manifold import TSNE
from datetime import datetime
# import os
# import sys
# sys.path.append(os.path.abspath('..'))
from utils import get_wikipedia_data, find_analogies, get_news_data_with_price
# from util import find_analogies
from sklearn.feature_extraction.text import TfidfTransformer
def tsne_on_wikipedia():
sentences, word2idx = get_wikipedia_data('file', 5000, by_paragraph=True)
with open('w2v_word2idx.json', 'w') as f:
json.dump(word2idx, f)
# build term document matrix
V = len(word2idx)
N = len(sentences)
print V, N
# create raw counts first
A = np.zeros((V, N))
j = 0
for sentence in sentences:
for i in sentence:
A[i,j] += 1
j += 1
print 'finished getting raw counts'
transformer = TfidfTransformer()
A = transformer.fit_transform(A)
A = A.toarray()
idx2word = {v:k for k, v in word2idx.iteritems()}
# plot the data in 2-D
tsne = TSNE()
Z = tsne.fit_transform(A)
print 'Z.shape:', Z.shape
plt.scatter(Z[:,0], Z[:,1])
for i in xrange(V):
try:
plt.annotate(s=idx2word[i].encode('utf8'), xy=(Z[i,0], Z[i,1]))
except:
print 'bad string:', idx2word[i]
plt.show()
We = Z
# find_analogies('king', 'man', 'woman', We, word2idx)
find_analogies('france', 'paris', 'london', We, word2idx)
find_analogies('france', 'paris', 'rome', We, word2idx)
find_analogies('paris', 'france', 'italy', We, word2idx)
def tsne_on_news():
get_news_data_with_price()
if __name__ == '__main__':
tsne_on_news()
|
mit
|
leonth/bulk-download-quandl
|
bulkdlquandl.py
|
1
|
3384
|
import itertools
import logging
import io
import os
import asyncio as aio
import aiohttp
import pandas as pd
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('asyncio').setLevel(logging.WARNING) # tone down asyncio debug messages
from settings import DL_DIR, AUTH_TOKEN, MAX_CONN
class QuandlUpdater(object):
def __init__(self):
self._dl_codes_queue = aio.Queue()
@aio.coroutine
def dl_quandl_wiki(self):
processors = [aio.async(self.process_source_datasets())]
for i in range(MAX_CONN):
fut = aio.async(self.process_dl_codes_queue(i))
processors.append(fut)
logger.debug('All processors online.')
print_task = aio.async(self.print_queue_status())
yield from aio.wait(processors)
logger.debug('All processors shut down.')
print_task.cancel()
@aio.coroutine
def process_source_datasets(self):
dfs = []
for page_num in itertools.count(start=1):
logger.debug('Downloading list page %d' % page_num)
url = 'http://www.quandl.com/api/v2/datasets.csv?query=*&source_code=WIKI&per_page=300&page=%d&auth_token=%s' % (page_num, AUTH_TOKEN)
r = yield from aiohttp.request('get', url)
text = yield from r.text()
if len(text) == 0:
break
df = pd.read_csv(io.StringIO(text), header=None, names=['code', 'name', 'start_date', 'end_date', 'frequency', 'last_updated'])
for code in df['code']:
yield from self._dl_codes_queue.put(code)
dfs.append(df)
concat_df = pd.concat(dfs)
concat_df.to_csv('dataset.WIKI.csv')
logger.debug('Finished downloading list.')
yield from self._dl_codes_queue.put(None) # put the stop sign
@aio.coroutine
def process_dl_codes_queue(self, id):
logger.debug('Starting processor #%d' % id)
while True:
code = yield from self._dl_codes_queue.get()
if code is None:
yield from self._dl_codes_queue.put(None) # put back the stop sign to let other coroutines see it
logger.debug('Shutting down processor #%d' % id)
return
else:
fp = os.path.join(DL_DIR, '%s.csv' % code.replace('/', '.'))
if os.path.isfile(fp):
logger.debug('Processor #%d: skipping %s as it is already downloaded' % (id, code))
continue
logger.debug('Processor #%d: processing %s' % (id, code))
url = 'http://www.quandl.com/api/v1/datasets/%s.csv?sort_order=asc&auth_token=%s' % (code, AUTH_TOKEN)
r = yield from aiohttp.request('get', url)
text = yield from r.text()
df = pd.read_csv(io.StringIO(text), index_col=0)
df.to_csv(fp)
logger.debug('Processor #%d: finished processing %s' % (id, code))
@aio.coroutine
def print_queue_status(self):
while True:
logger.info('Queue length: %d' % self._dl_codes_queue.qsize())
yield from aio.sleep(3)
if __name__ == "__main__":
if not os.path.isdir(DL_DIR):
os.makedirs(DL_DIR)
updater = QuandlUpdater()
aio.get_event_loop().run_until_complete(updater.dl_quandl_wiki())
|
mit
|
halwai/cvxpy
|
examples/extensions/kmeans.py
|
11
|
3555
|
import cvxpy as cvx
import mixed_integer as mi
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1
y_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
|
gpl-3.0
|
larroy/mxnet
|
example/rcnn/symdata/vis.py
|
11
|
1559
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
def vis_detection(im_orig, detections, class_names, thresh=0.7):
"""visualize [cls, conf, x1, y1, x2, y2]"""
import matplotlib.pyplot as plt
import random
plt.imshow(im_orig)
colors = [(random.random(), random.random(), random.random()) for _ in class_names]
for [cls, conf, x1, y1, x2, y2] in detections:
cls = int(cls)
if cls > 0 and conf > thresh:
rect = plt.Rectangle((x1, y1), x2 - x1, y2 - y1,
fill=False, edgecolor=colors[cls], linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(x1, y1 - 2, '{:s} {:.3f}'.format(class_names[cls], conf),
bbox=dict(facecolor=colors[cls], alpha=0.5), fontsize=12, color='white')
plt.show()
|
apache-2.0
|
crichardson17/starburst_atlas
|
Low_resolution_sims/DustFree_LowRes/Padova_cont/padova_cont_4/peaks_reader.py
|
33
|
2761
|
import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
#input files
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
# ---------------------------------------------------
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
savetxt('peaks', max_values, delimiter='\t')
|
gpl-2.0
|
parantapa/seaborn
|
seaborn/timeseries.py
|
13
|
15218
|
"""Timeseries plotting functions."""
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats, interpolate
import matplotlib as mpl
import matplotlib.pyplot as plt
from .external.six import string_types
from . import utils
from . import algorithms as algo
from .palettes import color_palette
def tsplot(data, time=None, unit=None, condition=None, value=None,
err_style="ci_band", ci=68, interpolate=True, color=None,
estimator=np.mean, n_boot=5000, err_palette=None, err_kws=None,
legend=True, ax=None, **kwargs):
"""Plot one or more timeseries with flexible representation of uncertainty.
This function is intended to be used with data where observations are
nested within sampling units that were measured at multiple timepoints.
It can take data specified either as a long-form (tidy) DataFrame or as an
ndarray with dimensions (unit, time) The interpretation of some of the
other parameters changes depending on the type of object passed as data.
Parameters
----------
data : DataFrame or ndarray
Data for the plot. Should either be a "long form" dataframe or an
array with dimensions (unit, time, condition). In both cases, the
condition field/dimension is optional. The type of this argument
determines the interpretation of the next few parameters. When
using a DataFrame, the index has to be sequential.
time : string or series-like
Either the name of the field corresponding to time in the data
DataFrame or x values for a plot when data is an array. If a Series,
the name will be used to label the x axis.
unit : string
Field in the data DataFrame identifying the sampling unit (e.g.
subject, neuron, etc.). The error representation will collapse over
units at each time/condition observation. This has no role when data
is an array.
value : string
Either the name of the field corresponding to the data values in
the data DataFrame (i.e. the y coordinate) or a string that forms
the y axis label when data is an array.
condition : string or Series-like
Either the name of the field identifying the condition an observation
falls under in the data DataFrame, or a sequence of names with a length
equal to the size of the third dimension of data. There will be a
separate trace plotted for each condition. If condition is a Series
with a name attribute, the name will form the title for the plot
legend (unless legend is set to False).
err_style : string or list of strings or None
Names of ways to plot uncertainty across units from set of
{ci_band, ci_bars, boot_traces, boot_kde, unit_traces, unit_points}.
Can use one or more than one method.
ci : float or list of floats in [0, 100]
Confidence interaval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
interpolate : boolean
Whether to do a linear interpolation between each timepoint when
plotting. The value of this parameter also determines the marker
used for the main plot traces, unless marker is specified as a keyword
argument.
color : seaborn palette or matplotlib color name or dictionary
Palette or color for the main plots and error representation (unless
plotting by unit, which can be separately controlled with err_palette).
If a dictionary, should map condition name to color spec.
estimator : callable
Function to determine central tendency and to pass to bootstrap
must take an ``axis`` argument.
n_boot : int
Number of bootstrap iterations.
err_palette : seaborn palette
Palette name or list of colors used when plotting data for each unit.
err_kws : dict, optional
Keyword argument dictionary passed through to matplotlib function
generating the error plot,
legend : bool, optional
If ``True`` and there is a ``condition`` variable, add a legend to
the plot.
ax : axis object, optional
Plot in given axis; if None creates a new figure
kwargs :
Other keyword arguments are passed to main plot() call
Returns
-------
ax : matplotlib axis
axis with plot data
Examples
--------
Plot a trace with translucent confidence bands:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(22)
>>> import seaborn as sns; sns.set(color_codes=True)
>>> x = np.linspace(0, 15, 31)
>>> data = np.sin(x) + np.random.rand(10, 31) + np.random.randn(10, 1)
>>> ax = sns.tsplot(data=data)
Plot a long-form dataframe with several conditions:
.. plot::
:context: close-figs
>>> gammas = sns.load_dataset("gammas")
>>> ax = sns.tsplot(time="timepoint", value="BOLD signal",
... unit="subject", condition="ROI",
... data=gammas)
Use error bars at the positions of the observations:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="ci_bars", color="g")
Don't interpolate between the observations:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> ax = sns.tsplot(data=data, err_style="ci_bars", interpolate=False)
Show multiple confidence bands:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, ci=[68, 95], color="m")
Use a different estimator:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, estimator=np.median)
Show each bootstrap resample:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="boot_traces", n_boot=500)
Show the trace from each sampling unit:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="unit_traces")
"""
# Sort out default values for the parameters
if ax is None:
ax = plt.gca()
if err_kws is None:
err_kws = {}
# Handle different types of input data
if isinstance(data, pd.DataFrame):
xlabel = time
ylabel = value
# Condition is optional
if condition is None:
condition = pd.Series(np.ones(len(data)))
legend = False
legend_name = None
n_cond = 1
else:
legend = True and legend
legend_name = condition
n_cond = len(data[condition].unique())
else:
data = np.asarray(data)
# Data can be a timecourse from a single unit or
# several observations in one condition
if data.ndim == 1:
data = data[np.newaxis, :, np.newaxis]
elif data.ndim == 2:
data = data[:, :, np.newaxis]
n_unit, n_time, n_cond = data.shape
# Units are experimental observations. Maybe subjects, or neurons
if unit is None:
units = np.arange(n_unit)
unit = "unit"
units = np.repeat(units, n_time * n_cond)
ylabel = None
# Time forms the xaxis of the plot
if time is None:
times = np.arange(n_time)
else:
times = np.asarray(time)
xlabel = None
if hasattr(time, "name"):
xlabel = time.name
time = "time"
times = np.tile(np.repeat(times, n_cond), n_unit)
# Conditions split the timeseries plots
if condition is None:
conds = range(n_cond)
legend = False
if isinstance(color, dict):
err = "Must have condition names if using color dict."
raise ValueError(err)
else:
conds = np.asarray(condition)
legend = True and legend
if hasattr(condition, "name"):
legend_name = condition.name
else:
legend_name = None
condition = "cond"
conds = np.tile(conds, n_unit * n_time)
# Value forms the y value in the plot
if value is None:
ylabel = None
else:
ylabel = value
value = "value"
# Convert to long-form DataFrame
data = pd.DataFrame(dict(value=data.ravel(),
time=times,
unit=units,
cond=conds))
# Set up the err_style and ci arguments for the loop below
if isinstance(err_style, string_types):
err_style = [err_style]
elif err_style is None:
err_style = []
if not hasattr(ci, "__iter__"):
ci = [ci]
# Set up the color palette
if color is None:
current_palette = mpl.rcParams["axes.color_cycle"]
if len(current_palette) < n_cond:
colors = color_palette("husl", n_cond)
else:
colors = color_palette(n_colors=n_cond)
elif isinstance(color, dict):
colors = [color[c] for c in data[condition].unique()]
else:
try:
colors = color_palette(color, n_cond)
except ValueError:
color = mpl.colors.colorConverter.to_rgb(color)
colors = [color] * n_cond
# Do a groupby with condition and plot each trace
for c, (cond, df_c) in enumerate(data.groupby(condition, sort=False)):
df_c = df_c.pivot(unit, time, value)
x = df_c.columns.values.astype(np.float)
# Bootstrap the data for confidence intervals
boot_data = algo.bootstrap(df_c.values, n_boot=n_boot,
axis=0, func=estimator)
cis = [utils.ci(boot_data, v, axis=0) for v in ci]
central_data = estimator(df_c.values, axis=0)
# Get the color for this condition
color = colors[c]
# Use subroutines to plot the uncertainty
for style in err_style:
# Allow for null style (only plot central tendency)
if style is None:
continue
# Grab the function from the global environment
try:
plot_func = globals()["_plot_%s" % style]
except KeyError:
raise ValueError("%s is not a valid err_style" % style)
# Possibly set up to plot each observation in a different color
if err_palette is not None and "unit" in style:
orig_color = color
color = color_palette(err_palette, len(df_c.values))
# Pass all parameters to the error plotter as keyword args
plot_kwargs = dict(ax=ax, x=x, data=df_c.values,
boot_data=boot_data,
central_data=central_data,
color=color, err_kws=err_kws)
# Plot the error representation, possibly for multiple cis
for ci_i in cis:
plot_kwargs["ci"] = ci_i
plot_func(**plot_kwargs)
if err_palette is not None and "unit" in style:
color = orig_color
# Plot the central trace
kwargs.setdefault("marker", "" if interpolate else "o")
ls = kwargs.pop("ls", "-" if interpolate else "")
kwargs.setdefault("linestyle", ls)
label = cond if legend else "_nolegend_"
ax.plot(x, central_data, color=color, label=label, **kwargs)
# Pad the sides of the plot only when not interpolating
ax.set_xlim(x.min(), x.max())
x_diff = x[1] - x[0]
if not interpolate:
ax.set_xlim(x.min() - x_diff, x.max() + x_diff)
# Add the plot labels
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if legend:
ax.legend(loc=0, title=legend_name)
return ax
# Subroutines for tsplot errorbar plotting
# ----------------------------------------
def _plot_ci_band(ax, x, ci, color, err_kws, **kwargs):
"""Plot translucent error bands around the central tendancy."""
low, high = ci
if "alpha" not in err_kws:
err_kws["alpha"] = 0.2
ax.fill_between(x, low, high, color=color, **err_kws)
def _plot_ci_bars(ax, x, central_data, ci, color, err_kws, **kwargs):
"""Plot error bars at each data point."""
for x_i, y_i, (low, high) in zip(x, central_data, ci.T):
ax.plot([x_i, x_i], [low, high], color=color,
solid_capstyle="round", **err_kws)
def _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs):
"""Plot 250 traces from bootstrap."""
err_kws.setdefault("alpha", 0.25)
err_kws.setdefault("linewidth", 0.25)
if "lw" in err_kws:
err_kws["linewidth"] = err_kws.pop("lw")
ax.plot(x, boot_data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_traces(ax, x, data, ci, color, err_kws, **kwargs):
"""Plot a trace for each observation in the original data."""
if isinstance(color, list):
if "alpha" not in err_kws:
err_kws["alpha"] = .5
for i, obs in enumerate(data):
ax.plot(x, obs, color=color[i], label="_nolegend_", **err_kws)
else:
if "alpha" not in err_kws:
err_kws["alpha"] = .2
ax.plot(x, data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_points(ax, x, data, color, err_kws, **kwargs):
"""Plot each original data point discretely."""
if isinstance(color, list):
for i, obs in enumerate(data):
ax.plot(x, obs, "o", color=color[i], alpha=0.8, markersize=4,
label="_nolegend_", **err_kws)
else:
ax.plot(x, data.T, "o", color=color, alpha=0.5, markersize=4,
label="_nolegend_", **err_kws)
def _plot_boot_kde(ax, x, boot_data, color, **kwargs):
"""Plot the kernal density estimate of the bootstrap distribution."""
kwargs.pop("data")
_ts_kde(ax, x, boot_data, color, **kwargs)
def _plot_unit_kde(ax, x, data, color, **kwargs):
"""Plot the kernal density estimate over the sample."""
_ts_kde(ax, x, data, color, **kwargs)
def _ts_kde(ax, x, data, color, **kwargs):
"""Upsample over time and plot a KDE of the bootstrap distribution."""
kde_data = []
y_min, y_max = data.min(), data.max()
y_vals = np.linspace(y_min, y_max, 100)
upsampler = interpolate.interp1d(x, data)
data_upsample = upsampler(np.linspace(x.min(), x.max(), 100))
for pt_data in data_upsample.T:
pt_kde = stats.kde.gaussian_kde(pt_data)
kde_data.append(pt_kde(y_vals))
kde_data = np.transpose(kde_data)
rgb = mpl.colors.ColorConverter().to_rgb(color)
img = np.zeros((kde_data.shape[0], kde_data.shape[1], 4))
img[:, :, :3] = rgb
kde_data /= kde_data.max(axis=0)
kde_data[kde_data > 1] = 1
img[:, :, 3] = kde_data
ax.imshow(img, interpolation="spline16", zorder=2,
extent=(x.min(), x.max(), y_min, y_max),
aspect="auto", origin="lower")
|
bsd-3-clause
|
holdenk/spark
|
python/pyspark/sql/tests/test_pandas_udf_typehints.py
|
22
|
9603
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import inspect
from typing import Union, Iterator, Tuple
from pyspark.sql.functions import mean, lit
from pyspark.testing.sqlutils import ReusedSQLTestCase, \
have_pandas, have_pyarrow, pandas_requirement_message, \
pyarrow_requirement_message
from pyspark.sql.pandas.typehints import infer_eval_type
from pyspark.sql.pandas.functions import pandas_udf, PandasUDFType
from pyspark.sql import Row
if have_pandas:
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class PandasUDFTypeHintsTests(ReusedSQLTestCase):
def test_type_annotation_scalar(self):
def func(col: pd.Series) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.DataFrame, col1: pd.Series) -> pd.DataFrame:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.DataFrame, *args: pd.Series) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.Series, *, col2: pd.DataFrame) -> pd.DataFrame:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: Union[pd.Series, pd.DataFrame], *, col2: pd.DataFrame) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def test_type_annotation_scalar_iter(self):
def func(iter: Iterator[pd.Series]) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(iter: Iterator[Tuple[pd.DataFrame, pd.Series]]) -> Iterator[pd.DataFrame]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(iter: Iterator[Tuple[pd.DataFrame, ...]]) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(
iter: Iterator[Tuple[Union[pd.DataFrame, pd.Series], ...]]
) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def test_type_annotation_group_agg(self):
def func(col: pd.Series) -> str:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.DataFrame, col1: pd.Series) -> int:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.DataFrame, *args: pd.Series) -> Row:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame) -> str:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.Series, *, col2: pd.DataFrame) -> float:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: Union[pd.Series, pd.DataFrame], *, col2: pd.DataFrame) -> float:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def test_type_annotation_negative(self):
def func(col: str) -> pd.Series:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*str",
infer_eval_type, inspect.signature(func))
def func(col: pd.DataFrame, col1: int) -> pd.DataFrame:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*int",
infer_eval_type, inspect.signature(func))
def func(col: Union[pd.DataFrame, str], col1: int) -> pd.DataFrame:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*str",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series) -> Tuple[pd.DataFrame]:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*Tuple",
infer_eval_type, inspect.signature(func))
def func(col, *args: pd.Series) -> pd.Series:
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame):
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series, *, col2) -> pd.DataFrame:
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def test_scalar_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def plus_one(v: Union[pd.Series, pd.DataFrame]) -> pd.Series:
return v + 1
plus_one = pandas_udf("long")(plus_one)
actual = df.select(plus_one(df.v).alias("plus_one"))
expected = df.selectExpr("(v + 1) as plus_one")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_scalar_iter_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def plus_one(itr: Iterator[pd.Series]) -> Iterator[pd.Series]:
for s in itr:
yield s + 1
plus_one = pandas_udf("long")(plus_one)
actual = df.select(plus_one(df.v).alias("plus_one"))
expected = df.selectExpr("(v + 1) as plus_one")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_group_agg_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def weighted_mean(v: pd.Series, w: pd.Series) -> float:
return np.average(v, weights=w)
weighted_mean = pandas_udf("double")(weighted_mean)
actual = df.groupby('id').agg(weighted_mean(df.v, lit(1.0))).sort('id')
expected = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_group_apply_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(v: pd.DataFrame) -> pd.DataFrame:
return v + 1
actual = df.groupby('id').applyInPandas(pandas_plus_one, schema=df.schema).sort('id')
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_cogroup_apply_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(left: pd.DataFrame, right: pd.DataFrame) -> pd.DataFrame:
return left + 1
actual = df.groupby('id').cogroup(
self.spark.range(10).groupby("id")
).applyInPandas(pandas_plus_one, schema=df.schema).sort('id')
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_map_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(iter: Iterator[pd.DataFrame]) -> Iterator[pd.DataFrame]:
return map(lambda v: v + 1, iter)
actual = df.mapInPandas(pandas_plus_one, schema=df.schema)
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_typehints import * # noqa: #401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/pandas/tseries/index.py
|
9
|
75758
|
# pylint: disable=E1101
from __future__ import division
import operator
import warnings
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from pandas.core.common import (_NS_DTYPE, _INT64_DTYPE,
_values_from_object, _maybe_box,
is_object_dtype, is_datetime64_dtype,
is_datetimetz, is_dtype_equal,
ABCSeries, is_integer, is_float,
DatetimeTZDtype)
from pandas.io.common import PerformanceWarning
from pandas.core.index import Index, Int64Index, Float64Index
import pandas.compat as compat
from pandas.compat import u
from pandas.tseries.frequencies import (
to_offset, get_period_alias,
Resolution)
from pandas.tseries.base import DatelikeOps, DatetimeIndexOpsMixin
from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay
from pandas.tseries.tools import parse_time_string, normalize_date
from pandas.tseries.timedeltas import to_timedelta
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas.tslib as tslib
import pandas._period as period
import pandas.algos as _algos
import pandas.index as _index
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _field_accessor(name, field, docstring=None):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = self._local_timestamps()
if field in ['is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end',
'is_year_start', 'is_year_end']:
month_kw = self.freq.kwds.get('startingMonth', self.freq.kwds.get('month', 12)) if self.freq else 12
result = tslib.get_start_end_field(values, field, self.freqstr, month_kw)
else:
result = tslib.get_date_field(values, field)
return self._maybe_mask_results(result,convert='float64')
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _dt_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
func = getattr(super(DatetimeIndex, self), opname)
if isinstance(other, datetime) or isinstance(other, compat.string_types):
other = _to_m8(other, tz=self.tz)
result = func(other)
if com.isnull(other):
result.fill(nat_result)
else:
if isinstance(other, list):
other = DatetimeIndex(other)
elif not isinstance(other, (np.ndarray, Index, ABCSeries)):
other = _ensure_datetime64(other)
result = func(np.asarray(other))
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view('i8') == tslib.iNaT
else:
o_mask = other.view('i8') == tslib.iNaT
if o_mask.any():
result[o_mask] = nat_result
mask = self.asi8 == tslib.iNaT
if mask.any():
result[mask] = nat_result
# support of bool dtype indexers
if com.is_bool_dtype(result):
return result
return Index(result)
return wrapper
def _ensure_datetime64(other):
if isinstance(other, np.datetime64):
return other
raise TypeError('%s type object %s' % (type(other), str(other)))
_midnight = time(0, 0)
def _new_DatetimeIndex(cls, d):
""" This is called upon unpickling, rather than the default which doesn't have arguments
and breaks __new__ """
# data are already in UTC
# so need to localize
tz = d.pop('tz',None)
result = cls.__new__(cls, verify_integrity=False, **d)
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
class DatetimeIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency information.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
tz : pytz.timezone or dateutil.tz.tzfile
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on order
- bool-ndarray where True signifies a DST time, False signifies
a non-DST time (note that this flag is only applicable for ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times
infer_dst : boolean, default False (DEPRECATED)
Attempt to infer fall dst-transition hours based on order
name : object
Name to be stored in the index
"""
_typ = 'datetimeindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(joinf, dtype='M8[ns]', **kwargs)
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
_algos.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__', nat_result=True)
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
_engine_type = _index.DatetimeEngine
tz = None
offset = None
_comparables = ['name', 'freqstr', 'tz']
_attributes = ['name', 'freq', 'tz']
_datetimelike_ops = ['year','month','day','hour','minute','second',
'weekofyear','week','dayofweek','weekday','dayofyear','quarter', 'days_in_month', 'daysinmonth',
'date','time','microsecond','nanosecond','is_month_start','is_month_end',
'is_quarter_start','is_quarter_end','is_year_start','is_year_end',
'tz','freq']
_is_numeric_dtype = False
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False,
closed=None, ambiguous='raise', dtype=None, **kwargs):
dayfirst = kwargs.pop('dayfirst', None)
yearfirst = kwargs.pop('yearfirst', None)
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, freq,
tz=tz, normalize=normalize, closed=closed,
ambiguous=ambiguous)
if not isinstance(data, (np.ndarray, Index, ABCSeries)):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = tslib.parse_str_array_to_datetime(data, freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst)
else:
data = tools.to_datetime(data, errors='raise')
data.offset = freq
if isinstance(data, DatetimeIndex):
if name is not None:
data.name = name
if tz is not None:
return data.tz_localize(tz, ambiguous=ambiguous)
return data
if issubclass(data.dtype.type, compat.string_types):
data = tslib.parse_str_array_to_datetime(data, freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst)
if issubclass(data.dtype.type, np.datetime64) or is_datetimetz(data):
if isinstance(data, ABCSeries):
data = data._values
if isinstance(data, DatetimeIndex):
if tz is None:
tz = data.tz
subarr = data.values
if freq is None:
freq = data.offset
verify_integrity = False
else:
if data.dtype != _NS_DTYPE:
subarr = tslib.cast_to_nanoseconds(data)
else:
subarr = data
elif data.dtype == _INT64_DTYPE:
if isinstance(data, Int64Index):
raise TypeError('cannot convert Int64Index->DatetimeIndex')
if copy:
subarr = np.asarray(data, dtype=_NS_DTYPE)
else:
subarr = data.view(_NS_DTYPE)
else:
if isinstance(data, (ABCSeries, Index)):
values = data._values
else:
values = data
if lib.is_string_array(values):
subarr = tslib.parse_str_array_to_datetime(values, freq=freq, dayfirst=dayfirst,
yearfirst=yearfirst)
else:
try:
subarr = tools.to_datetime(data, box=False)
# make sure that we have a index/ndarray like (and not a Series)
if isinstance(subarr, ABCSeries):
subarr = subarr._values
if subarr.dtype == np.object_:
subarr = tools._to_datetime(subarr, box=False)
except ValueError:
# tz aware
subarr = tools._to_datetime(data, box=False, utc=True)
# we may not have been able to convert
if not (is_datetimetz(subarr) or np.issubdtype(subarr.dtype, np.datetime64)):
raise ValueError('Unable to convert %s to datetime dtype'
% str(data))
if isinstance(subarr, DatetimeIndex):
if tz is None:
tz = subarr.tz
else:
if tz is not None:
tz = tslib.maybe_get_tz(tz)
if (not isinstance(data, DatetimeIndex) or
getattr(data, 'tz', None) is None):
# Convert tz-naive to UTC
ints = subarr.view('i8')
subarr = tslib.tz_localize_to_utc(ints, tz,
ambiguous=ambiguous)
subarr = subarr.view(_NS_DTYPE)
subarr = cls._simple_new(subarr, name=name, freq=freq, tz=tz)
# if dtype is provided, coerce here
if dtype is not None:
if not is_dtype_equal(subarr.dtype, dtype):
if subarr.tz is not None:
raise ValueError("cannot localize from non-UTC data")
dtype = DatetimeTZDtype.construct_from_string(dtype)
subarr = subarr.tz_localize(dtype.tz)
if verify_integrity and len(subarr) > 0:
if freq is not None and not freq_infer:
inferred = subarr.inferred_freq
if inferred != freq.freqstr:
on_freq = cls._generate(subarr[0], None, len(subarr), None, freq, tz=tz, ambiguous=ambiguous)
if not np.array_equal(subarr.asi8, on_freq.asi8):
raise ValueError('Inferred frequency {0} from passed dates does not '
'conform to passed frequency {1}'.format(inferred, freq.freqstr))
if freq_infer:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False, ambiguous='raise', closed=None):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Must specify two of start, end, or periods')
_normalized = True
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
try:
inferred_tz = tools._infer_tzinfo(start, end)
except:
raise TypeError('Start and end cannot both be tz-aware with '
'different timezones')
inferred_tz = tslib.maybe_get_tz(inferred_tz)
# these may need to be localized
tz = tslib.maybe_get_tz(tz)
if tz is not None:
date = start or end
if date.tzinfo is not None and hasattr(tz, 'localize'):
tz = tz.localize(date.replace(tzinfo=None)).tzinfo
if tz is not None and inferred_tz is not None:
if not inferred_tz == tz:
raise AssertionError("Inferred time zone not equal to passed "
"time zone")
elif inferred_tz is not None:
tz = inferred_tz
if start is not None:
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
if hasattr(offset, 'delta') and offset != offsets.Day():
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is None:
start = start.tz_localize(tz, ambiguous=False)
if end is not None and end.tz is None:
end = end.tz_localize(tz, ambiguous=False)
if start and end:
if start.tz is None and end.tz is not None:
start = start.tz_localize(end.tz, ambiguous=False)
if end.tz is None and start.tz is not None:
end = end.tz_localize(start.tz, ambiguous=False)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
else:
if tz is not None:
# naive dates
if start is not None and start.tz is not None:
start = start.replace(tzinfo=None)
if end is not None and end.tz is not None:
end = end.replace(tzinfo=None)
if start and end:
if start.tz is None and end.tz is not None:
end = end.replace(tzinfo=None)
if end.tz is None and start.tz is not None:
start = start.replace(tzinfo=None)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None and getattr(index, 'tz', None) is None:
index = tslib.tz_localize_to_utc(com._ensure_int64(index), tz,
ambiguous=ambiguous)
index = index.view(_NS_DTYPE)
index = cls._simple_new(index, name=name, freq=offset, tz=tz)
if not left_closed:
index = index[1:]
if not right_closed:
index = index[:-1]
return index
@property
def _box_func(self):
return lambda x: Timestamp(x, offset=self.offset, tz=self.tz)
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
if self._has_same_tz(value):
return _to_m8(value)
raise ValueError('Passed item and index have different timezone')
def _local_timestamps(self):
utc = _utc()
if self.is_monotonic:
return tslib.tz_convert(self.asi8, utc, self.tz)
else:
values = self.asi8
indexer = values.argsort()
result = tslib.tz_convert(values.take(indexer), utc, self.tz)
n = len(indexer)
reverse = np.empty(n, dtype=np.int_)
reverse.put(indexer, np.arange(n))
return result.take(reverse)
@classmethod
def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
"""
if not getattr(values,'dtype',None):
# empty, but with dtype compat
if values is None:
values = np.empty(0, dtype=_NS_DTYPE)
return cls(values, name=name, freq=freq, tz=tz, dtype=dtype, **kwargs)
values = np.array(values,copy=False)
if is_object_dtype(values):
return cls(values, name=name, freq=freq, tz=tz, dtype=dtype, **kwargs).values
elif not is_datetime64_dtype(values):
values = com._ensure_int64(values).view(_NS_DTYPE)
result = object.__new__(cls)
result._data = values
result.name = name
result.offset = freq
result.tz = tslib.maybe_get_tz(tz)
result._reset_identity()
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@cache_readonly
def _timezone(self):
""" Comparable timezone both for pytz / dateutil"""
return tslib.get_timezone(self.tzinfo)
def _has_same_tz(self, other):
zzone = self._timezone
# vzone sholdn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
vzone = tslib.get_timezone(getattr(other, 'tzinfo', '__no_tz__'))
return zzone == vzone
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is None and end is None:
# I somewhat believe this should never be raised externally and therefore
# should be a `PandasError` but whatever...
raise TypeError('Must specify either start or end.')
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if (start is None or end is None) and periods is None:
raise TypeError('Must either specify period or provide both start and end.')
if offset is None:
# This can't happen with external-facing code, therefore PandasError
raise TypeError('Must provide offset.')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = tools._to_datetime(list(xdr), box=False)
cachedRange = DatetimeIndex._simple_new(arr)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if not isinstance(end, Timestamp):
raise AssertionError('end must be an instance of Timestamp')
end = offset.rollback(end)
endLoc = cachedRange.get_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
if not isinstance(start, Timestamp):
raise AssertionError('start must be an instance of Timestamp')
start = offset.rollforward(start)
startLoc = cachedRange.get_loc(start)
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.get_loc(start)
endLoc = cachedRange.get_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return tslib.ints_to_pydatetime(self.asi8, self.tz)
_na_value = tslib.NaT
"""The expected NA value to use with this index."""
@cache_readonly
def _is_dates_only(self):
from pandas.core.format import _is_dates_only
return _is_dates_only(self.values)
@property
def _formatter_func(self):
from pandas.core.format import _get_format_datetime64
formatter = _get_format_datetime64(is_dates_only=self._is_dates_only)
return lambda x: "'%s'" % formatter(x, tz=self.tz)
def __reduce__(self):
# we use a special reudce here because we need
# to simply set the .tz (and not reinterpret it)
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_DatetimeIndex, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(DatetimeIndex, self).__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
# provide numpy < 1.7 compat
if nd_state[2] == 'M8[us]':
new_state = np.ndarray.__reduce__(data.astype('M8[ns]'))
np.ndarray.__setstate__(data, new_state[2])
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _sub_datelike(self, other):
# subtract a datetime from myself, yielding a TimedeltaIndex
from pandas import TimedeltaIndex
other = Timestamp(other)
# require tz compat
if not self._has_same_tz(other):
raise TypeError("Timestamp subtraction must have the same timezones or no timezones")
i8 = self.asi8
result = i8 - other.value
result = self._maybe_mask_results(result,fill_value=tslib.iNaT)
return TimedeltaIndex(result,name=self.name,copy=False)
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
if freq is not None:
# no need to infer if freq is None
attrs['freq'] = 'infer'
return attrs
def _add_delta(self, delta):
from pandas import TimedeltaIndex
name = self.name
if isinstance(delta, (Tick, timedelta, np.timedelta64)):
new_values = self._add_delta_td(delta)
elif isinstance(delta, TimedeltaIndex):
new_values = self._add_delta_tdi(delta)
# update name when delta is Index
name = com._maybe_match_name(self, delta)
elif isinstance(delta, DateOffset):
new_values = self._add_offset(delta).asi8
else:
new_values = self.astype('O') + delta
tz = 'UTC' if self.tz is not None else None
result = DatetimeIndex(new_values, tz=tz, name=name, freq='infer')
utc = _utc()
if self.tz is not None and self.tz is not utc:
result = result.tz_convert(self.tz)
return result
def _add_offset(self, offset):
try:
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
result = offset.apply_index(values)
if self.tz is not None:
result = result.tz_localize(self.tz)
return result
except NotImplementedError:
warnings.warn("Non-vectorized DateOffset being applied to Series or DatetimeIndex",
PerformanceWarning)
return self.astype('O') + offset
def _format_native_types(self, na_rep=u('NaT'),
date_format=None, **kwargs):
from pandas.core.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(self, date_format)
return tslib.format_array_from_datetime(self.asi8,
tz=self.tz,
format=format,
na_rep=na_rep)
def to_datetime(self, dayfirst=False):
return self.copy()
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
elif dtype == _INT64_DTYPE:
return self.asi8.copy()
elif dtype == _NS_DTYPE and self.tz is not None:
return self.tz_convert('UTC').tz_localize(None)
elif dtype == str:
return self._shallow_copy(values=self.format(), infer=True)
else: # pragma: no cover
raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype)
def _get_time_micros(self):
utc = _utc()
values = self.asi8
if self.tz is not None and self.tz is not utc:
values = self._local_timestamps()
return tslib.get_time_micros(values)
def to_series(self, keep_tz=False):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
keep_tz : optional, defaults False.
return the data keeping the timezone.
If keep_tz is True:
If the timezone is not set, the resulting
Series will have a datetime64[ns] dtype.
Otherwise the Series will have an datetime64[ns, tz] dtype; the
tz will be preserved.
If keep_tz is False:
Series will have a datetime64[ns] dtype. TZ aware
objects will have the tz removed.
Returns
-------
Series
"""
from pandas import Series
return Series(self._to_embed(keep_tz), index=self, name=self.name)
def _to_embed(self, keep_tz=False):
"""
return an array repr of this object, potentially casting to object
This is for internal compat
"""
if keep_tz and self.tz is not None:
# preserve the tz & copy
return self.copy(deep=True)
return self.values.copy()
def to_pydatetime(self):
"""
Return DatetimeIndex as object ndarray of datetime.datetime objects
Returns
-------
datetimes : ndarray
"""
return tslib.ints_to_pydatetime(self.asi8, tz=self.tz)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from pandas.tseries.period import PeriodIndex
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
msg = "You must pass a freq argument as current index has none."
raise ValueError(msg)
freq = get_period_alias(freq)
return PeriodIndex(self.values, name=self.name, freq=freq, tz=self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occurring frequency
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype=_NS_DTYPE)
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, DatetimeIndex):
result.tz = this.tz
if (result.freq is None and
(this.freq is not None or other.freq is not None)):
result.offset = to_offset(result.inferred_freq)
return result
def to_perioddelta(self, freq):
"""
Calcuates TimedeltaIndex of difference between index
values and index converted to PeriodIndex at specified
freq. Used for vectorized offsets
.. versionadded:: 0.17.0
Parameters
----------
freq : Period frequency
Returns
-------
y : TimedeltaIndex
"""
return to_timedelta(self.asi8 - self.to_period(freq).to_timestamp().asi8)
def union_many(self, others):
"""
A bit of a hack to accelerate unioning a collection of indexes
"""
this = self
for other in others:
if not isinstance(this, DatetimeIndex):
this = Index.union(this, other)
continue
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = this._maybe_utc_convert(other)
if this._can_fast_union(other):
this = this._fast_union(other)
else:
tz = this.tz
this = Index.union(this, other)
if isinstance(this, DatetimeIndex):
this.tz = tz
if this.freq is None:
this.offset = to_offset(this.inferred_freq)
return this
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
to_concat, factory = _process_concat_data(to_concat, name)
return factory(to_concat)
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if (not isinstance(other, DatetimeIndex) and len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer',
'mixed-integer-float', 'mixed')):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers)
def _maybe_utc_convert(self, other):
this = self
if isinstance(other, DatetimeIndex):
if self.tz is not None:
if other.tz is None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
elif other.tz is not None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
if self.tz != other.tz:
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, DatetimeIndex)
and self.offset == other.offset
and self._can_fast_union(other)):
joined = self._shallow_copy(joined)
joined.name = name
return joined
else:
tz = getattr(other, 'tz', None)
return self._simple_new(joined, name, tz=tz)
def _can_fast_union(self, other):
if not isinstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None or offset != other.offset:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
try:
return (right_start == left_end + offset) or right_start in left
except (ValueError):
# if we are comparing an offset that does not propogate timezones
# this will raise
return False
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = com._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
else:
return type(self)(start=left_start,
end=max(left_end, right_end),
freq=left.offset)
def __iter__(self):
"""
Return an iterator over the boxed values
Returns
-------
Timestamps : ndarray
"""
# convert in chunks of 10k for efficiency
data = self.asi8
l = len(self)
chunksize = 10000
chunks = int(l / chunksize) + 1
for i in range(chunks):
start_i = i*chunksize
end_i = min((i+1)*chunksize,l)
converted = tslib.ints_to_pydatetime(data[start_i:end_i], tz=self.tz, offset=self.offset, box=True)
for v in converted:
yield v
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
if self.tz != other.tz:
raise ValueError('Passed item and index have different timezone')
return self._simple_new(result, name=name, freq=None, tz=self.tz)
def intersection(self, other):
"""
Specialized intersection for DatetimeIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
elif (other.offset is None or self.offset is None or
other.offset != self.offset or
not other.offset.isAnchored() or
(not self.is_monotonic or not other.is_monotonic)):
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _parsed_string_to_bounds(self, reso, parsed):
"""
Calculate datetime bounds for parsed time string and its resolution.
Parameters
----------
reso : Resolution
Resolution provided by parsed string.
parsed : datetime
Datetime from parsed string.
Returns
-------
lower, upper: pd.Timestamp
"""
is_monotonic = self.is_monotonic
if reso == 'year':
return (Timestamp(datetime(parsed.year, 1, 1), tz=self.tz),
Timestamp(datetime(parsed.year, 12, 31, 23, 59, 59, 999999), tz=self.tz))
elif reso == 'month':
d = tslib.monthrange(parsed.year, parsed.month)[1]
return (Timestamp(datetime(parsed.year, parsed.month, 1), tz=self.tz),
Timestamp(datetime(parsed.year, parsed.month, d, 23, 59, 59, 999999), tz=self.tz))
elif reso == 'quarter':
qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
d = tslib.monthrange(parsed.year, qe)[1] # at end of month
return (Timestamp(datetime(parsed.year, parsed.month, 1), tz=self.tz),
Timestamp(datetime(parsed.year, qe, d, 23, 59, 59, 999999), tz=self.tz))
elif reso == 'day':
st = datetime(parsed.year, parsed.month, parsed.day)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Day(), tz=self.tz).value - 1))
elif reso == 'hour':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Hour(),
tz=self.tz).value - 1))
elif reso == 'minute':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Minute(),
tz=self.tz).value - 1))
elif reso == 'second':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute, second=parsed.second)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Second(),
tz=self.tz).value - 1))
elif reso == 'microsecond':
st = datetime(parsed.year, parsed.month, parsed.day,
parsed.hour, parsed.minute, parsed.second,
parsed.microsecond)
return (Timestamp(st, tz=self.tz), Timestamp(st, tz=self.tz))
else:
raise KeyError
def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
is_monotonic = self.is_monotonic
if ((reso in ['day', 'hour', 'minute'] and
not (self._resolution < Resolution.get_reso(reso) or
not is_monotonic)) or
(reso == 'second' and
not (self._resolution <= Resolution.RESO_SEC or
not is_monotonic))):
# These resolution/monotonicity validations came from GH3931,
# GH3452 and GH2369.
raise KeyError
if reso == 'microsecond':
# _partial_date_slice doesn't allow microsecond resolution, but
# _parsed_string_to_bounds allows it.
raise KeyError
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
stamps = self.asi8
if is_monotonic:
# we are out of range
if len(stamps) and (
(use_lhs and t1.value < stamps[0] and t2.value < stamps[0]) or (
(use_rhs and t1.value > stamps[-1] and t2.value > stamps[-1]))):
raise KeyError
# a monotonic (sorted) series can be sliced
left = stamps.searchsorted(t1.value, side='left') if use_lhs else None
right = stamps.searchsorted(t2.value, side='right') if use_rhs else None
return slice(left, right)
lhs_mask = (stamps >= t1.value) if use_lhs else True
rhs_mask = (stamps <= t2.value) if use_rhs else True
# try to find a the dates
return (lhs_mask & rhs_mask).nonzero()[0]
def _possibly_promote(self, other):
if other.inferred_type == 'date':
other = DatetimeIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if isinstance(key, datetime):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
return self.get_value_maybe_box(series, key)
if isinstance(key, time):
locs = self.indexer_at_time(key)
return series.take(locs)
try:
return _maybe_box(self, Index.get_value(self, series, key), series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
elif not isinstance(key, Timestamp):
key = Timestamp(key)
values = self._engine.get_value(_values_from_object(series), key)
return _maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance)
if isinstance(key, datetime):
# needed to localize naive datetimes
key = Timestamp(key, tz=self.tz)
return Index.get_loc(self, key, method, tolerance)
if isinstance(key, time):
if method is not None:
raise NotImplementedError('cannot yet lookup inexact labels '
'when key is a time object')
return self.indexer_at_time(key)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timestamp(key, tz=self.tz)
return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to datetime according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : string / None
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
if is_float(label) or isinstance(label, time) or is_integer(label):
self._invalid_indexer('slice',label)
if isinstance(label, compat.string_types):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(label, freq)
bounds = self._parsed_string_to_bounds(reso, parsed)
return bounds[0 if side == 'left' else 1]
else:
return label
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(key, freq)
loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
Return indexer for specified label slice.
Index.slice_indexer, customized to handle time slicing.
In addition to functionality provided by Index.slice_indexer, does the
following:
- if both `start` and `end` are instances of `datetime.time`, it
invokes `indexer_between_time`
- if `start` and `end` are both either string or None perform
value-based selection in non-monotonic cases.
"""
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError('Must have step size of 1 with time slices')
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError('Cannot mix time and non-time slice keys')
try:
return Index.slice_indexer(self, start, end, step)
except KeyError:
# For historical reasons DatetimeIndex by default supports
# value-based partial (aka string) slices on non-monotonic arrays,
# let's try that.
if ((start is None or isinstance(start, compat.string_types)) and
(end is None or isinstance(end, compat.string_types))):
mask = True
if start is not None:
start_casted = self._maybe_cast_slice_bound(start, 'left', kind)
mask = start_casted <= self
if end is not None:
end_casted = self._maybe_cast_slice_bound(end, 'right', kind)
mask = (self <= end_casted) & mask
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
else:
raise
# alias to offset
def _get_freq(self):
return self.offset
def _set_freq(self, value):
self.offset = value
freq = property(fget=_get_freq, fset=_set_freq, doc="get/set the frequncy of the Index")
year = _field_accessor('year', 'Y', "The year of the datetime")
month = _field_accessor('month', 'M', "The month as January=1, December=12")
day = _field_accessor('day', 'D', "The days of the datetime")
hour = _field_accessor('hour', 'h', "The hours of the datetime")
minute = _field_accessor('minute', 'm', "The minutes of the datetime")
second = _field_accessor('second', 's', "The seconds of the datetime")
millisecond = _field_accessor('millisecond', 'ms', "The milliseconds of the datetime")
microsecond = _field_accessor('microsecond', 'us', "The microseconds of the datetime")
nanosecond = _field_accessor('nanosecond', 'ns', "The nanoseconds of the datetime")
weekofyear = _field_accessor('weekofyear', 'woy', "The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 'dow',
"The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
dayofyear = _field_accessor('dayofyear', 'doy', "The ordinal day of the year")
quarter = _field_accessor('quarter', 'q', "The quarter of the date")
days_in_month = _field_accessor('days_in_month', 'dim', "The number of days in the month\n\n.. versionadded:: 0.16.0")
daysinmonth = days_in_month
is_month_start = _field_accessor('is_month_start', 'is_month_start', "Logical indicating if first day of month (defined by frequency)")
is_month_end = _field_accessor('is_month_end', 'is_month_end', "Logical indicating if last day of month (defined by frequency)")
is_quarter_start = _field_accessor('is_quarter_start', 'is_quarter_start', "Logical indicating if first day of quarter (defined by frequency)")
is_quarter_end = _field_accessor('is_quarter_end', 'is_quarter_end', "Logical indicating if last day of quarter (defined by frequency)")
is_year_start = _field_accessor('is_year_start', 'is_year_start', "Logical indicating if first day of year (defined by frequency)")
is_year_end = _field_accessor('is_year_end', 'is_year_end', "Logical indicating if last day of year (defined by frequency)")
@property
def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# can't call self.map() which tries to treat func as ufunc
# and causes recursion warnings on python 2.6
return self._maybe_mask_results(_algos.arrmap_object(self.asobject.values,
lambda x: np.nan if x is tslib.NaT else x.time()))
@property
def date(self):
"""
Returns numpy array of datetime.date. The date part of the Timestamps.
"""
return self._maybe_mask_results(_algos.arrmap_object(self.asobject.values, lambda x: x.date()))
def normalize(self):
"""
Return DatetimeIndex with times to midnight. Length is unaltered
Returns
-------
normalized : DatetimeIndex
"""
new_values = tslib.date_normalize(self.asi8, self.tz)
return DatetimeIndex(new_values, freq='infer', name=self.name,
tz=self.tz)
def searchsorted(self, key, side='left'):
if isinstance(key, (np.ndarray, Index)):
key = np.array(key, dtype=_NS_DTYPE, copy=False)
else:
key = _to_m8(key, tz=self.tz)
return self.values.searchsorted(key, side=side)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'datetime'
@property
def inferred_type(self):
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return 'datetime64'
@cache_readonly
def dtype(self):
if self.tz is None:
return _NS_DTYPE
return com.DatetimeTZDtype('ns',self.tz)
@property
def is_all_dates(self):
return True
@cache_readonly
def is_normalized(self):
"""
Returns True if all of the dates are at midnight ("no time")
"""
return tslib.dates_normalized(self.asi8, self.tz)
@cache_readonly
def _resolution(self):
return period.resolution(self.asi8, self.tz)
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if (not hasattr(other, 'inferred_type') or
other.inferred_type != 'datetime64'):
if self.offset is not None:
return False
try:
other = DatetimeIndex(other)
except:
return False
if self._has_same_tz(other):
return np.array_equal(self.asi8, other.asi8)
return False
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
freq = None
if isinstance(item, (datetime, np.datetime64)):
self._assert_can_do_op(item)
if not self._has_same_tz(item):
raise ValueError('Passed item and index have different timezone')
# check freq can be preserved on edge cases
if self.size and self.freq is not None:
if (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item, tz=self.tz)
try:
new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
if self.tz is not None:
new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item,compat.string_types):
return self.asobject.insert(loc, item)
raise TypeError("cannot insert DatetimeIndex with incompatible label")
def delete(self, loc):
"""
Make a new DatetimeIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : DatetimeIndex
"""
new_dates = np.delete(self.asi8, loc)
freq = None
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if com.is_list_like(loc):
loc = lib.maybe_indices_to_slice(com._ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
if self.tz is not None:
new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)
def tz_convert(self, tz):
"""
Convert tz-aware DatetimeIndex from one time zone to another (using pytz/dateutil)
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding UTC time.
Returns
-------
normalized : DatetimeIndex
Raises
------
TypeError
If DatetimeIndex is tz-naive.
"""
tz = tslib.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError('Cannot convert tz-naive timestamps, use '
'tz_localize to localize')
# No conversion since timestamps are all UTC to begin with
return self._shallow_copy(tz=tz)
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def tz_localize(self, tz, ambiguous='raise'):
"""
Localize tz-naive DatetimeIndex to given time zone (using pytz/dateutil),
or remove timezone from tz-aware DatetimeIndex
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding local time.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on order
- bool-ndarray where True signifies a DST time, False signifies
a non-DST time (note that this flag is only applicable for ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times
infer_dst : boolean, default False (DEPRECATED)
Attempt to infer fall dst-transition hours based on order
Returns
-------
localized : DatetimeIndex
Raises
------
TypeError
If the DatetimeIndex is tz-aware and tz is not None.
"""
if self.tz is not None:
if tz is None:
new_dates = tslib.tz_convert(self.asi8, 'UTC', self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = tslib.maybe_get_tz(tz)
# Convert to UTC
new_dates = tslib.tz_localize_to_utc(self.asi8, tz,
ambiguous=ambiguous)
new_dates = new_dates.view(_NS_DTYPE)
return self._shallow_copy(new_dates, tz=tz)
def indexer_at_time(self, time, asof=False):
"""
Select values at particular time of day (e.g. 9:30AM)
Parameters
----------
time : datetime.time or string
Returns
-------
values_at_time : TimeSeries
"""
from dateutil.parser import parse
if asof:
raise NotImplementedError("'asof' argument is not supported")
if isinstance(time, compat.string_types):
time = parse(time).time()
if time.tzinfo:
# TODO
raise NotImplementedError("argument 'time' with timezone info is "
"not supported")
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (micros == time_micros).nonzero()[0]
def indexer_between_time(self, start_time, end_time, include_start=True,
include_end=True):
"""
Select values between particular times of day (e.g., 9:00-9:30AM)
Parameters
----------
start_time : datetime.time or string
end_time : datetime.time or string
include_start : boolean, default True
include_end : boolean, default True
tz : string or pytz.timezone or dateutil.tz.tzfile, default None
Returns
-------
values_between_time : TimeSeries
"""
from dateutil.parser import parse
if isinstance(start_time, compat.string_types):
start_time = parse(start_time).time()
if isinstance(end_time, compat.string_types):
end_time = parse(end_time).time()
if start_time.tzinfo or end_time.tzinfo:
raise NotImplementedError("argument 'time' with timezone info is "
"not supported")
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros),
rop(time_micros, end_micros))
return mask.nonzero()[0]
def to_julian_date(self):
"""
Convert DatetimeIndex to Float64Index of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
http://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = self.year
month = self.month
day = self.day
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return Float64Index(day +
np.fix((153*month - 457)/5) +
365*year +
np.floor(year / 4) -
np.floor(year / 100) +
np.floor(year / 400) +
1721118.5 +
(self.hour +
self.minute/60.0 +
self.second/3600.0 +
self.microsecond/3600.0/1e+6 +
self.nanosecond/3600.0/1e+9
)/24.0)
DatetimeIndex._add_numeric_methods_disabled()
DatetimeIndex._add_logical_methods_disabled()
DatetimeIndex._add_datetimelike_methods()
def _generate_regular_range(start, end, periods, offset):
if isinstance(offset, Tick):
stride = offset.nanos
if periods is None:
b = Timestamp(start).value
# cannot just use e = Timestamp(end) + 1 because arange breaks when
# stride is too large, see GH10887
e = b + (Timestamp(end).value - b)//stride * stride + stride//2
# end.tz == start.tz by this point due to _generate implementation
tz = start.tz
elif start is not None:
b = Timestamp(start).value
e = b + np.int64(periods) * stride
tz = start.tz
elif end is not None:
e = Timestamp(end).value + stride
b = e - np.int64(periods) * stride
tz = end.tz
else:
raise ValueError("at least 'start' or 'end' should be specified "
"if a 'period' is given.")
data = np.arange(b, e, stride, dtype=np.int64)
data = DatetimeIndex._simple_new(data, None, tz=tz)
else:
if isinstance(start, Timestamp):
start = start.to_pydatetime()
if isinstance(end, Timestamp):
end = end.to_pydatetime()
xdr = generate_range(start=start, end=end,
periods=periods, offset=offset)
dates = list(xdr)
# utc = len(dates) > 0 and dates[0].tzinfo is not None
data = tools.to_datetime(dates)
return data
def date_range(start=None, end=None, periods=None, freq='D', tz=None,
normalize=False, name=None, closed=None, **kwargs):
"""
Return a fixed frequency datetime index, with day (calendar) as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Hong_Kong
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name of the resulting index
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
Return a fixed frequency datetime index, with business day as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name for the resulting index
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def cdate_range(start=None, end=None, periods=None, freq='C', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
**EXPERIMENTAL** Return a fixed frequency datetime index, with
CustomBusinessDay as the default frequency
.. warning:: EXPERIMENTAL
The CustomBusinessDay class is not officially supported and the API is
likely to change in future versions. Use this at your own risk.
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'C' (CustomBusinessDay)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name for the resulting index
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : DatetimeIndex
"""
if freq=='C':
holidays = kwargs.pop('holidays', [])
weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri')
freq = CDay(holidays=holidays, weekmask=weekmask)
return DatetimeIndex(start=start, end=end, periods=periods, freq=freq,
tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def _to_m8(key, tz=None):
'''
Timestamp-like => dt64
'''
if not isinstance(key, Timestamp):
# this also converts strings
key = Timestamp(key, tz=tz)
return np.int64(tslib.pydt_to_i8(key)).view(_NS_DTYPE)
_CACHE_START = Timestamp(datetime(1950, 1, 1))
_CACHE_END = Timestamp(datetime(2030, 1, 1))
_daterange_cache = {}
def _naive_in_cache_range(start, end):
if start is None or end is None:
return False
else:
if start.tzinfo is not None or end.tzinfo is not None:
return False
return _in_range(start, end, _CACHE_START, _CACHE_END)
def _in_range(start, end, rng_start, rng_end):
return start > rng_start and end < rng_end
def _use_cached_range(offset, _normalized, start, end):
return (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end))
def _time_to_micros(time):
seconds = time.hour * 60 * 60 + 60 * time.minute + time.second
return 1000000 * seconds + time.microsecond
def _process_concat_data(to_concat, name):
klass = Index
kwargs = {}
concat = np.concatenate
all_dti = True
need_utc_convert = False
has_naive = False
tz = None
for x in to_concat:
if not isinstance(x, DatetimeIndex):
all_dti = False
else:
if tz is None:
tz = x.tz
if x.tz is None:
has_naive = True
if x.tz != tz:
need_utc_convert = True
tz = 'UTC'
if all_dti:
need_obj_convert = False
if has_naive and tz is not None:
need_obj_convert = True
if need_obj_convert:
to_concat = [x.asobject.values for x in to_concat]
else:
if need_utc_convert:
to_concat = [x.tz_convert('UTC').values for x in to_concat]
else:
to_concat = [x.values for x in to_concat]
# well, technically not a "class" anymore...oh well
klass = DatetimeIndex._simple_new
kwargs = {'tz': tz}
concat = com._concat_compat
else:
for i, x in enumerate(to_concat):
if isinstance(x, DatetimeIndex):
to_concat[i] = x.asobject.values
elif isinstance(x, Index):
to_concat[i] = x.values
factory_func = lambda x: klass(concat(x), name=name, **kwargs)
return to_concat, factory_func
|
gpl-2.0
|
gfyoung/pandas
|
pandas/tests/series/methods/test_clip.py
|
2
|
3378
|
import numpy as np
import pytest
import pandas as pd
from pandas import Series, Timestamp, isna, notna
import pandas._testing as tm
class TestSeriesClip:
def test_clip(self, datetime_series):
val = datetime_series.median()
assert datetime_series.clip(lower=val).min() == val
assert datetime_series.clip(upper=val).max() == val
result = datetime_series.clip(-0.5, 0.5)
expected = np.clip(datetime_series, -0.5, 0.5)
tm.assert_series_equal(result, expected)
assert isinstance(expected, Series)
def test_clip_types_and_nulls(self):
sers = [
Series([np.nan, 1.0, 2.0, 3.0]),
Series([None, "a", "b", "c"]),
Series(pd.to_datetime([np.nan, 1, 2, 3], unit="D")),
]
for s in sers:
thresh = s[2]
lower = s.clip(lower=thresh)
upper = s.clip(upper=thresh)
assert lower[notna(lower)].min() == thresh
assert upper[notna(upper)].max() == thresh
assert list(isna(s)) == list(isna(lower))
assert list(isna(s)) == list(isna(upper))
def test_clip_with_na_args(self):
"""Should process np.nan argument as None """
# GH#17276
s = Series([1, 2, 3])
tm.assert_series_equal(s.clip(np.nan), Series([1, 2, 3]))
tm.assert_series_equal(s.clip(upper=np.nan, lower=np.nan), Series([1, 2, 3]))
# GH#19992
tm.assert_series_equal(s.clip(lower=[0, 4, np.nan]), Series([1, 4, np.nan]))
tm.assert_series_equal(s.clip(upper=[1, np.nan, 1]), Series([1, np.nan, 1]))
def test_clip_against_series(self):
# GH#6966
s = Series([1.0, 1.0, 4.0])
lower = Series([1.0, 2.0, 3.0])
upper = Series([1.5, 2.5, 3.5])
tm.assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))
tm.assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("upper", [[1, 2, 3], np.asarray([1, 2, 3])])
def test_clip_against_list_like(self, inplace, upper):
# GH#15390
original = Series([5, 6, 7])
result = original.clip(upper=upper, inplace=inplace)
expected = Series([1, 2, 3])
if inplace:
result = original
tm.assert_series_equal(result, expected, check_exact=True)
def test_clip_with_datetimes(self):
# GH#11838
# naive and tz-aware datetimes
t = Timestamp("2015-12-01 09:30:30")
s = Series([Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:31:00")])
result = s.clip(upper=t)
expected = Series(
[Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:30:30")]
)
tm.assert_series_equal(result, expected)
t = Timestamp("2015-12-01 09:30:30", tz="US/Eastern")
s = Series(
[
Timestamp("2015-12-01 09:30:00", tz="US/Eastern"),
Timestamp("2015-12-01 09:31:00", tz="US/Eastern"),
]
)
result = s.clip(upper=t)
expected = Series(
[
Timestamp("2015-12-01 09:30:00", tz="US/Eastern"),
Timestamp("2015-12-01 09:30:30", tz="US/Eastern"),
]
)
tm.assert_series_equal(result, expected)
|
bsd-3-clause
|
bcharlas/mytrunk
|
doc/sphinx/ipython_directive.py
|
8
|
18579
|
# -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. For example, the following code in your Sphinx
config file will configure this directive for the following input/output
prompts ``Yade [1]:`` and ``-> [1]:``::
import ipython_directive as id
id.rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
id.rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
id.fmtin ='Yade [%d]:'
id.fmtout=' -> [%d]:'
id.rc_override=dict(
prompt_in1="Yade [\#]:",
prompt_in2=" .\D..",
prompt_out=" -> [\#]:"
)
id.reconfig_shell()
import ipython_console_highlighting as ich
ich.IPythonConsoleLexer.input_prompt=
re.compile("(Yade \[[0-9]+\]: )|( \.\.\.+:)")
ich.IPythonConsoleLexer.output_prompt=
re.compile("(( -> )|(Out)\[[0-9]+\]: )|( \.\.\.+:)")
ich.IPythonConsoleLexer.continue_prompt=re.compile(" \.\.\.+:")
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
- Make sure %bookmarks used internally are removed on exit.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generatlizations.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import cStringIO
import imp
import os
import re
import shutil
import sys
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import matplotlib
import sphinx
from docutils.parsers.rst import directives
matplotlib.use('Agg')
# Our own
import IPython
from IPython.Shell import MatplotlibShell
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[a-z]', x)[0])
for x in sphinx_version[:2]])
COMMENT, INPUT, OUTPUT = range(3)
rc_override = {}
rgxin = re.compile('In \[(\d+)\]:\s?(.*)\s*')
rgxcont = re.compile(' \.+:\s?(.*)\s*')
rgxout = re.compile('Out\[(\d+)\]:\s?(.*)\s*')
fmtin = 'In [%d]:'
fmtout = 'Out[%d]:'
fmtcont = ' .\D.:'
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
#continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
#Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
matchcont = rgxcont.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif matchcont: #nextline.startswith(continuation):
inputline += '\n' + matchcont.group(1) #nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = cStringIO.StringIO()
IPython.Shell.Term.cout = self.cout
IPython.Shell.Term.cerr = self.cout
argv = ['-autocall', '0']
self.user_ns = {}
self.user_glocal_ns = {}
self.IP = IPython.ipmaker.make_IPython(
argv, self.user_ns, self.user_glocal_ns, embedded=True,
#shell_class=IPython.Shell.InteractiveShell,
shell_class=MatplotlibShell,
rc_override = dict(colors = 'NoColor', **rc_override))
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# we need bookmark the current dir first so we can save
# relative to it
self.process_input_line('bookmark ipy_basedir')
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
sys.stdout = self.cout
#self.IP.resetbuffer()
self.IP.push(self.IP.prefilter(line, 0))
#self.IP.runlines(line)
sys.stdout = stdout
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
#print 'INPUT:', data
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
#continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
#Nc = len(continuation)
if is_savefig:
saveargs = decorator.split(' ')
filename = saveargs[1]
outfile = os.path.join('_static/%s'%filename)
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = outfile
image_directive = '\n'.join(imagerows)
# TODO: can we get "rest" from ipython
#self.process_input_line('\n'.join(input_lines))
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
else:
# only submit the line in non-verbatim mode
self.process_input_line(line)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line)
formatted_line = fmtcont.replace('\D','.'*len(str(lineno)))+line #'%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress:
if len(rest.strip()):
if is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
self.cout.truncate(0)
return ret, input_lines, output, is_doctest, image_file
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
ind = found.find(output_prompt)
if ind<0:
raise RuntimeError('output prompt="%s" does not match out line=%s'%(output_prompt, found))
found = found[len(output_prompt):].strip()
if found!=submitted:
raise RuntimeError('doctest failure for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted))
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data block for COMMENT token."""
if not self.is_suppress:
return [data]
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
m = rgxin.match(str(self.IP.outputcache.prompt1).strip())
lineno = int(m.group(1))
input_prompt = fmtin%lineno
output_prompt = fmtout%lineno
image_file = None
image_directive = None
# XXX - This needs a second refactor. There's too much state being
# held globally, which makes for a very awkward interface and large,
# hard to test functions. I've already broken this up at least into
# three separate processors to isolate the logic better, but this only
# serves to highlight the coupling. Next we need to clean it up...
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
out_data, input_lines, output, is_doctest, image_file= \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
if image_file is not None:
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir')
self.process_input_line('cd -b ipy_basedir')
self.process_input_line(command)
self.process_input_line('cd -b ipy_thisdir')
self.cout.seek(0)
self.cout.truncate(0)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt')
# A global instance used below. XXX: not sure why this can't be created inside
# ipython_directive itself.
shell = EmbeddedSphinxShell()
def reconfig_shell():
"""Called after setting module-level variables to re-instantiate
with the set values (since shell is instantiated first at import-time
when module variables have default values)"""
global shell
shell = EmbeddedSphinxShell()
def ipython_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine,
):
debug = ipython_directive.DEBUG
shell.is_suppress = options.has_key('suppress')
shell.is_doctest = options.has_key('doctest')
shell.is_verbatim = options.has_key('verbatim')
#print 'ipy', shell.is_suppress, options
parts = '\n'.join(content).split('\n\n')
lines = ['.. sourcecode:: ipython', '']
figures = []
for part in parts:
block = block_parser(part)
if len(block):
rows, figure = shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else:
#print 'INSERTING %d lines'%len(lines)
state_machine.insert_input(
lines, state_machine.input_lines.source(0))
return []
ipython_directive.DEBUG = False
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
options = {'suppress': directives.flag,
'doctest': directives.flag,
'verbatim': directives.flag,
}
app.add_directive('ipython', ipython_directive, True, (0, 2, 0), **options)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: np.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
ipython_directive.DEBUG = True
#options = dict(suppress=True)
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
test()
|
gpl-2.0
|
krisaju95/NewsArticleClustering
|
module7_skMeansClustering.py
|
1
|
7438
|
import pickle
import numpy as np
import pandas as pd
import os
import math
path = "C:/Users/hp/Desktop/FINAL YEAR PROJECT/S8/"
D = set()
A = []
words = set()
dataFrame2 = pickle.load( open(os.path.join(path, 'Feature Set','dataFrame2.p'), "rb" ))
dataFrame3 = pickle.load( open(os.path.join(path, 'Feature Set','dataFrame3.p'), "rb" ))
cosineSimilarityMatrix = pickle.load( open(os.path.join(path, 'KMeansClustering','dataFrame4.p'), "rb" ))
wordSetSize = len(dataFrame3.columns)
numberOfDocuments = len(dataFrame3.index)
m = 1
centroids = pickle.load( open(os.path.join(path, 'KMeansClustering','initialCentroids.p'), "rb" ))
dataFrame5 = pd.DataFrame(np.zeros(numberOfDocuments).reshape(numberOfDocuments,1))
clusters = []
previousClusters = []
k = len(centroids.index)
centroidCosineSimilarity = pd.DataFrame(np.zeros(shape = (numberOfDocuments , k)).reshape(numberOfDocuments , k))
# Check if the newly found clusters are the same as the previously found clusters
def convergenceCase():
i =0
if previousClusters == []:
return False
for cluster in clusters:
if cluster != previousClusters[i]:
return False
else:
i = i + 1
return True
# Given two documents, calculate their cosine similarity
def cosineSimilarity(value1 , value2):
d1 = 0
d2 = 0
dotProduct = 0
v1 = value1.as_matrix()
v2 = value2.as_matrix()
document1 = np.square(v1)
document2 = np.square(v2)
dotProduct = np.dot(v1 , v2)
d1 = math.sqrt( document1.sum() )
d2 = math.sqrt( document2.sum() )
if d1 * d2 == 0:
return 0
cosineSimilarityValue = dotProduct/(d1*d2)
return cosineSimilarityValue
# Find the most similar centroid for each document in the dataset
def findMostSimilarCentroids():
mostSimilarValue = 0
mostSimilarCentroid = 0
for row in dataFrame5.index:
mostSimilarValue = 0
mostSimilarCentroid = 0
for column in centroidCosineSimilarity.columns:
if centroidCosineSimilarity.ix[row , column] > mostSimilarValue:
mostSimilarValue = centroidCosineSimilarity.ix[row , column]
mostSimilarCentroid = column
dataFrame5.ix[row , "ClusterID"] = mostSimilarCentroid
dataFrame5.ix[row , "maxSimilarityValue"] = mostSimilarValue
# Initialize the set D with all the documents from the dataset
def initializeSetD():
for column in cosineSimilarityMatrix.columns:
D.add(column)
# Create the initial set of clusters with k empty lists, each empty list being a cluster
def initializeClusters():
global clusters
clusters = []
for i in range(k):
clusters.append([])
# Initalize a dataframe for the centroid vectors with zero values
def initializeCentroids():
for row in centroids.index:
for word in dataFrame3.columns:
centroids.ix[row , word] = 0
# Find the new centroids for each cluster once the data has been updated
def calculateNewCentroids():
global centroids
initializeCentroids()
clusterID = 0
clusterSizes = [0 , 0 , 0, 0, 0]
dataFrame3Matrix = dataFrame3.as_matrix()
centroidsMatrix = centroids.as_matrix()
centroidColumns = centroids.columns
for row in dataFrame5.index:
clusterID = dataFrame5.ix[row , "ClusterID"]
clusterSizes[int(clusterID)] = clusterSizes[int(clusterID)] + 1
centroidsMatrix[int(clusterID)] = np.add(centroidsMatrix[int(clusterID)] , dataFrame3Matrix[row])
for row in centroids.index:
centroidsMatrix[row] = np.divide(centroidsMatrix[row] , float(clusterSizes[row]))
centroids = pd.DataFrame(centroidsMatrix)
centroids.columns = centroidColumns
# Create a dataframe with cosine similarity values for all documents with each of the centroids
def calculateCosineSimilarity():
for row in range(numberOfDocuments):
document1 = dataFrame3.loc[row , :]
for column in range(k):
document2 = centroids.loc[column , :]
centroidCosineSimilarity.ix[row , column] = cosineSimilarity(document1 , document2)
# Based on the data in df5, place each dcoument in its respective cluster
def generateClusters():
clusterID = 0
initializeClusters()
for row in dataFrame5.index:
clusterID = int(dataFrame5.ix[row , "ClusterID"])
clusters[clusterID].append(row)
# Find the centroid with maximum similarity for a given document and return the clusterID along with the similarity value
def findClosestCluster(row):
maxSimilarityValue = 0
clusterID = 0
for centroid in centroidCosineSimilarity.columns:
if centroidCosineSimilarity.ix[row , centroid] > maxSimilarityValue:
maxSimilarityValue = centroidCosineSimilarity.ix[row , centroid]
clusterID = centroid
return clusterID , maxSimilarityValue
# Create a dataframe with the cluster ID and similarity value for each document
def updateCentroidData():
clusterID = 0
newSimilarityValue = 0
for row in dataFrame5.index:
clusterID = int(dataFrame5.ix[row , "ClusterID"])
if centroidCosineSimilarity.ix[row , clusterID] < dataFrame5.ix[row , "maxSimilarityValue"]:
clusterID , newSimilarityValue = findClosestCluster(row)
dataFrame5.ix[row , "maxSimilarityValue"] = newSimilarityValue
dataFrame5.ix[row , "ClusterID"] = clusterID
else:
dataFrame5.ix[row , "maxSimilarityValue"] = centroidCosineSimilarity.ix[row , clusterID]
# Main function to perform clustering on the dataset
def skMeansClustering():
global previousClusters
print "Performing Spherical K-Means Clustering"
calculateCosineSimilarity()
findMostSimilarCentroids()
generateClusters()
for i in range(50):
calculateNewCentroids()
calculateCosineSimilarity()
updateCentroidData()
generateClusters()
#print dataFrame5
if convergenceCase():
break
else:
print "Clustering iteration " , i + 1
#print centroidCosineSimilarity
previousClusters = list(clusters)
print "Converged in ", i , " iteration(s)"
print "Clusters have been generated"
print "Saving data in DataFrame5 as a pickle package and as a CSV"
dataFrame5.to_pickle(os.path.join(path, 'KMeansClustering','dataFrame5.p'))
dataFrame5.to_csv(os.path.join(path, 'KMeansClustering','dataFrame5.csv'))
print "DataFrame5 has been saved"
skMeansClustering()
|
gpl-3.0
|
nlholdem/icodoom
|
ICO1/deep_feedback_learning_old/plotOutputs.py
|
4
|
2070
|
import numpy as np
from numpy import genfromtxt
import matplotlib.pyplot as plt
#data_or = genfromtxt("test_deep_fbl_cpp_feedback_learning_or.dat", delimiter=" ")
#data_xor = genfromtxt("test_deep_fbl_cpp_feedback_learning_xor.dat", delimiter=" ")
#data_and = genfromtxt("test_deep_fbl_cpp_feedback_learning_and.dat", delimiter=" ")
data = genfromtxt("test_deep_fbl_cpp_feedback_learning.dat", delimiter=" ")
err = genfromtxt("test_deep_fbl_cpp_feedback_learning.err", delimiter=" ")
wts = genfromtxt("test_deep_fbl_cpp_feedback_learning.wts", delimiter=" ")
data = data[1:2000,:]
err = err[1:2000,:]
wts = wts[1:2000,:]
numFilters = 3
numInputs = 2
numNeurons = 3
plt.figure(0)
plt.plot(data[:,2], 'k') # state
plt.plot(data[:,3], 'r') # reflex
plt.plot(data[:,9], 'b') # net output
plt.plot(data[:,1], 'y') # unfiltered inputs
plt.plot(data[:,0], 'y') # unfiltered inputs
indx=0
for i in range (2,numNeurons):
for j in range(numInputs):
for k in range(numFilters):
plt.figure(indx+1)
plt.plot(10.*wts[:,3+6*indx], 'b') # weight change
plt.plot(wts[:,5+6*indx], 'y') # filtered inputs
plt.plot(err[:,4+5*i], 'k') # neuron error
indx +=1
"""
plt.figure(1)
plt.plot(data[:,2], 'k')
plt.plot(data[:,3], 'r')
plt.plot(data[:,9], 'b')
plt.plot(data[:,5], 'r')
plt.figure(2)
plt.plot(err[:,4], 'b')
plt.figure(3)
plt.plot(err[:,9], 'b')
plt.figure(4)
plt.plot(err[:,14], 'b')
plt.figure(5)
plt.plot(wts[:,1], 'b')
plt.plot(wts[:,5], 'b')
plt.plot(wts[:,9], 'b')
plt.plot(wts[:,13], 'b')
plt.plot(wts[:,17], 'b')
plt.plot(wts[:,21], 'b')
"""
#plt.plot(data[:,6], 'r')
#plt.plot(data[:,7], 'y')
#plt.figure(2)
#plt.plot(data[:,4], 'k')
#plt.plot(data[:,5], 'r')
"""
plt.figure(1)
plt.plot(data_or[:,4], 'k')
plt.plot(data_or[:,5], 'r')
plt.plot(data_or[:,12], 'b')
plt.figure(2)
plt.plot(data_and[:,4], 'k')
plt.plot(data_and[:,5], 'r')
plt.plot(data_and[:,12], 'b')
plt.figure(3)
plt.plot(data_xor[:,4], 'k')
plt.plot(data_xor[:,5], 'r')
plt.plot(data_xor[:,12], 'b')
"""
plt.show()
|
gpl-3.0
|
ericdill/PyXRF
|
pyxrf/model/fit_spectrum.py
|
1
|
24546
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
__author__ = 'Li Li'
import numpy as np
import time
import copy
import six
import os
from collections import OrderedDict
import multiprocessing
from atom.api import Atom, Str, observe, Typed, Int, List, Dict, Float
from skxray.fitting.xrf_model import (ModelSpectrum, update_parameter_dict,
sum_area, set_parameter_bound, ParamController,
nnls_fit, weighted_nnls_fit, trim,
construct_linear_model, linear_spectrum_fitting,
compute_escape_peak, register_strategy)
from skxray.fitting.background import snip_method
from pyxrf.model.guessparam import (calculate_profile, fit_strategy_list)
from lmfit import fit_report
import logging
logger = logging.getLogger(__name__)
class Fit1D(Atom):
#file_path = Str()
file_status = Str()
param_dict = Dict()
element_list = List()
#parameters = Dict()
data_all = Typed(np.ndarray)
data = Typed(np.ndarray)
fit_x = Typed(np.ndarray)
fit_y = Typed(np.ndarray)
residual = Typed(np.ndarray)
comps = Dict()
fit_strategy1 = Int(0)
fit_strategy2 = Int(0)
fit_strategy3 = Int(0)
fit_strategy4 = Int(0)
fit_strategy5 = Int(0)
fit_result = Typed(object)
data_title = Str()
result_folder = Str()
all_strategy = Typed(object) #Typed(OrderedDict)
x0 = Typed(np.ndarray)
y0 = Typed(np.ndarray)
bg = Typed(np.ndarray)
es_peak = Typed(np.ndarray)
cal_x = Typed(np.ndarray)
cal_y = Typed(np.ndarray)
cal_spectrum = Dict()
# attributes used by the ElementEdit window
selected_element = Str()
selected_elements = List()
def __init__(self, *args, **kwargs):
self.result_folder = kwargs['working_directory']
self.all_strategy = OrderedDict()
@observe('selected_element')
def _selected_element_changed(self, changed):
element = self.selected_element.split('_')[0]
self.selected_elements = sorted([e for e in self.param_dict.keys()
if element in e])
def get_new_param(self, param):
self.param_dict = copy.deepcopy(param)
element_list = self.param_dict['non_fitting_values']['element_list']
self.element_list = [e.strip(' ') for e in element_list.split(',')]
print('get new element list : {}'.format(self.element_list))
# register the strategy and extend the parameter list
# to cover all given elements
for strat_name in fit_strategy_list:
strategy = extract_strategy(self.param_dict, strat_name)
# register the strategy and extend the parameter list
# to cover all given elements
register_strategy(strat_name, strategy)
set_parameter_bound(self.param_dict, strat_name)
@observe('data')
def _update_data(self, change):
self.data = np.asarray(self.data)
@observe('fit_strategy1')
def update_strategy1(self, change):
print(change)
self.all_strategy.update({'strategy1': change['value']})
if change['value']:
logger.info('Strategy at step 1 is: {}'.
format(fit_strategy_list[change['value']-1]))
@observe('fit_strategy2')
def update_strategy2(self, change):
self.all_strategy.update({'strategy2': change['value']})
if change['value']:
logger.info('Strategy at step 2 is: {}'.
format(fit_strategy_list[change['value']-1]))
@observe('fit_strategy3')
def update_strategy3(self, change):
self.all_strategy.update({'strategy3': change['value']})
if change['value']:
logger.info('Strategy at step 3 is: {}'.
format(fit_strategy_list[change['value']-1]))
@observe('fit_strategy4')
def update_strategy4(self, change):
self.all_strategy.update({'strategy4': change['value']})
if change['value']:
logger.info('Strategy at step 4 is: {}'.
format(fit_strategy_list[change['value']-1]))
@observe('fit_strategy5')
def update_strategy5(self, change):
self.all_strategy.update({'strategy5': change['value']})
if change['value']:
logger.info('Strategy at step 5 is: {}'.
format(fit_strategy_list[change['value']-1]))
def update_param_with_result(self):
update_parameter_dict(self.param_dict, self.fit_result)
def define_range(self):
"""
Cut x range according to values define in param_dict.
"""
x = np.arange(self.data.size)
# ratio to transfer energy value back to channel value
approx_ratio = 100
lowv = self.param_dict['non_fitting_values']['energy_bound_low']['value'] * approx_ratio
highv = self.param_dict['non_fitting_values']['energy_bound_high']['value'] * approx_ratio
self.x0, self.y0 = trim(x, self.data, lowv, highv)
def get_background(self):
self.bg = snip_method(self.y0,
self.param_dict['e_offset']['value'],
self.param_dict['e_linear']['value'],
self.param_dict['e_quadratic']['value'])
def escape_peak(self):
ratio = 0.005
xe, ye = compute_escape_peak(self.data, ratio, self.param_dict)
lowv = self.param_dict['non_fitting_values']['energy_bound_low']
highv = self.param_dict['non_fitting_values']['energy_bound_high']
xe, self.es_peak = trim(xe, ye, lowv, highv)
logger.info('Escape peak is considered with ratio {}'.format(ratio))
# align to the same length
if self.y0.size > self.es_peak.size:
temp = self.es_peak
self.es_peak = np.zeros(len(self.y0.size))
self.es_peak[:temp.size] = temp
else:
self.es_peak = self.es_peak[:self.y0.size]
def get_profile(self):
self.define_range()
self.cal_x, self.cal_spectrum = calculate_profile(self.data, self.param_dict,
self.element_list)
self.cal_y = np.zeros(len(self.cal_x))
for k, v in six.iteritems(self.cal_spectrum):
#print('component: {}'.format(k))
self.cal_y += v
self.residual = self.cal_y - self.y0
def fit_data(self, x0, y0,
c_val=1e-2, fit_num=100, c_weight=1e3):
MS = ModelSpectrum(self.param_dict, self.element_list)
MS.assemble_models()
result = MS.model_fit(x0, y0,
weights=1/np.sqrt(c_weight+y0), maxfev=fit_num,
xtol=c_val, ftol=c_val, gtol=c_val)
self.comps.clear()
comps = result.eval_components(x=x0)
self.comps = combine_lines(comps, self.element_list, self.bg)
xnew = (result.values['e_offset'] +
result.values['e_linear'] * x0 +
result.values['e_quadratic'] * x0**2)
self.fit_x = xnew
self.fit_y = result.best_fit
self.fit_result = result
self.residual = self.fit_y - y0
def fit_multiple(self):
"""
Fit data in sequence according to given strategies.
The param_dict is extended to cover elemental parameters.
"""
self.define_range()
self.get_background()
#self.escape_peak()
#PC = ParamController(self.param_dict, self.element_list)
#self.param_dict = PC.params
#print('param keys {}'.format(self.param_dict.keys()))
y0 = self.y0 - self.bg #- self.es_peak
t0 = time.time()
logger.info('Start fitting!')
for k, v in six.iteritems(self.all_strategy):
if v:
strat_name = fit_strategy_list[v-1]
logger.info('Fit with {}: {}'.format(k, strat_name))
strategy = extract_strategy(self.param_dict, strat_name)
# register the strategy and extend the parameter list
# to cover all given elements
register_strategy(strat_name, strategy)
set_parameter_bound(self.param_dict, strat_name)
self.fit_data(self.x0, y0)
self.update_param_with_result()
self.fit_y += self.bg #+ self.es_peak
t1 = time.time()
logger.warning('Time used for fitting is : {}'.format(t1-t0))
self.save_result()
def fit_single_pixel(self):
"""
This function performs single pixel fitting. Multiprocess is considered.
"""
strategy_pixel = 'linear'
set_parameter_bound(self.param_dict, strategy_pixel)
logger.info('Starting single pixel fitting')
t0 = time.time()
result_map = fit_pixel_fast_multi(self.data_all, self.param_dict)
t1 = time.time()
logger.warning('Time used for pixel fitting is : {}'.format(t1-t0))
# save data
fpath = os.path.join(self.result_folder, 'Root.h5')
write_to_hdf(fpath, result_map)
#import matplotlib.pyplot as plt
#plt.imshow(result_map['Fe_K'])
#plt.show()
# currently save data using pickle, need to be updated
import pickle
fpath = os.path.join(self.result_folder, 'root_data')
pickle.dump(result_map, open(fpath, 'wb'))
def save_result(self, fname=None):
"""
Parameters
----------
fname : str, optional
name of output file
"""
if not fname:
fname = self.data_title+'_out.txt'
filepath = os.path.join(self.result_folder, fname)
with open(filepath, 'w') as myfile:
myfile.write(fit_report(self.fit_result, sort_pars=True))
logger.warning('Results are saved to {}'.format(filepath))
def combine_lines(components, element_list, background):
"""
Combine results for different lines of the same element.
And also add background, compton and elastic.
Parameters
----------
components : dict
output results from lmfit
element_list : list
list of elemental lines
background : array
background calculated in given range
Returns
-------
dict :
combined results for elements and other related peaks.
"""
new_components = {}
for e in element_list:
e_temp = e.split('_')[0]
intensity = 0
for k, v in six.iteritems(components):
if e_temp in k:
intensity += v
new_components[e] = intensity
# add background and elastic
new_components.update({'background': background})
new_components.update({'compton': components['compton']})
new_components.update({'elastic': components['elastic_']})
return new_components
def extract_strategy(param, name):
"""
Extract given strategy from param dict.
Parameters
----------
param : dict
saving all parameters
name : str
strategy name
Returns
-------
dict :
with given strategy as value
"""
return {k: v[name] for k, v in six.iteritems(param) if k != 'non_fitting_values'}
def fit_pixel_fast(data, param):
"""
Single pixel fit of experiment data. No multiprocess is applied.
.. warning :: This function is not optimized as it calls linear_spectrum_fitting function,
where lots of repeated calculation are processed.
Parameters
----------
data : array
3D data of experiment spectrum
param : dict
fitting parameters
Returns
-------
dict :
fitting values for all the elements
"""
datas = data.shape
x0 = np.arange(datas[2])
elist = param['non_fitting_values']['element_list'].split(', ')
elist = [e.strip(' ') for e in elist]
elist = [e+'_K' for e in elist if ('_' not in e)]
non_element = ['compton', 'elastic', 'background']
total_list = elist + non_element
result_map = dict()
for v in total_list:
result_map.update({v: np.zeros([datas[0], datas[1]])})
for i in xrange(datas[0]):
logger.info('Row number at {} out of total {}'.format(i, datas[0]))
for j in xrange(datas[1]):
#logger.info('Column number at {} out of total {}'.format(j, datas[1]))
x, result = linear_spectrum_fitting(data[i, j, :], param,
elemental_lines=elist, constant_weight=5)
for v in total_list:
if v in result:
result_map[v][i, j] = np.sum(result[v])
return result_map
def fit_per_line(row_num, data, matv, param):
"""
Fit experiment data for a given row.
Parameters
----------
row_num : int
which row to fit
data : array
3D data of experiment spectrum
param : dict
fitting parameters
Returns
-------
array :
fitting values for all the elements at a given row.
"""
datas = data.shape
logger.info('Row number is {}'.format(row_num))
out = []
for i in range(datas[1]):
bg = snip_method(data[row_num, i, :],
param['e_offset']['value'],
param['e_linear']['value'],
param['e_quadratic']['value'])
y = data[row_num, i, :] - bg
result, res = fit_pixel(y, matv, weight=True)
result = list(result)# + [np.sum(bg)]
out.append(result)
return np.array(out)
def fit_pixel_fast_multi(data, param):
"""
Multiprocess fit of experiment data.
Parameters
----------
data : array
3D data of experiment spectrum
param : dict
fitting parameters
Returns
-------
dict :
fitting values for all the elements
"""
#logger.info('Row number at {} out of total {}'.format(i, datas[0]))
#logger.info('no_processors_to_use = {}'.format(no_processors_to_use))
no_processors_to_use = multiprocessing.cpu_count()
logger.info('cpu count: {}'.format(no_processors_to_use))
#print 'Creating pool with %d processes\n' % no_processors_to_use
pool = multiprocessing.Pool(no_processors_to_use)
datas = data.shape
y0 = data[0, 0, :]
x0 = np.arange(len(y0))
# ratio to transfer energy value back to channel value
approx_ratio = 100
lowv = param['non_fitting_values']['energy_bound_low'] * approx_ratio
highv = param['non_fitting_values']['energy_bound_high'] * approx_ratio
x, y = trim(x0, y0, lowv, highv)
start_i = x0[x0 == x[0]][0]
end_i = x0[x0 == x[-1]][0]
e_select, matv = construct_linear_model(x, param)
mat_sum = np.sum(matv, axis=0)
elist = param['non_fitting_values']['element_list'].split(', ')
elist = [e.strip(' ') for e in elist]
elist = [e+'_K' for e in elist if ('_' not in e)]
result_pool = [pool.apply_async(fit_per_line,
(i, data[:, :, start_i:end_i+1], matv, param)) for i in range(datas[0])]
results = []
for r in result_pool:
results.append(r.get())
pool.terminate()
pool.join()
# results = []
# for i in range(datas[0]):
# outv = fit_per_line(i, data[:, :, start_i:end_i+1], matv, param)
# results.append(outv)
results = np.array(results)
non_element = ['compton', 'elastic', 'background']
total_list = elist + non_element
result_map = dict()
for i in range(len(total_list)-1):
result_map.update({total_list[i]: results[:, :, i]*mat_sum[i]})
# add background
result_map.update({total_list[-1]: results[:, :, -1]})
# for v in total_list:
# for i in xrange(datas[0]):
# for j in xrange(datas[1]):
# result_map[v][i, j] = results[i, j].get(v, 0)
sum_total = np.zeros([results.shape[0], results.shape[1], matv.shape[0]])
for m in range(sum_total.shape[0]):
for n in range(sum_total.shape[1]):
for i in range(len(total_list)):
sum_total[m, n, :] += results[m, n, i] * matv[:, i]
print('label range: {}, {}'.format(start_i, end_i))
#import pickle
fit_path = '/Users/Li/Downloads/xrf_data/'
fpath = os.path.join(fit_path, 'fit_data')
#pickle.dump(result_map, open(fpath, 'wb'))
np.save(fpath, sum_total)
return result_map
# def fit_pixel_fast_multi(data, param):
# """
# Multiprocess fit of experiment data.
#
# Parameters
# ----------
# data : array
# 3D data of experiment spectrum
# param : dict
# fitting parameters
#
# Returns
# -------
# dict :
# fitting values for all the elements
# """
#
# #logger.info('Row number at {} out of total {}'.format(i, datas[0]))
# #logger.info('no_processors_to_use = {}'.format(no_processors_to_use))
# no_processors_to_use = multiprocessing.cpu_count()
# logger.info('cpu count: {}'.format(no_processors_to_use))
# #print 'Creating pool with %d processes\n' % no_processors_to_use
# pool = multiprocessing.Pool(no_processors_to_use)
#
# datas = data.shape
#
# x0 = np.arange(datas[2])
#
# elist = param['non_fitting_values']['element_list'].split(', ')
# elist = [e.strip(' ') for e in elist]
# elist = [e+'_K' for e in elist if ('_' not in e)]
#
# non_element = ['compton', 'elastic', 'background']
# total_list = elist + non_element
#
# result_map = dict()
# for v in total_list:
# result_map.update({v: np.zeros([datas[0], datas[1]])})
#
# result_pool = [pool.apply_async(fit_per_line,
# (i, data, param)) for i in range(datas[0])]
#
# results = []
# for r in result_pool:
# results.append(r.get())
#
# pool.terminate()
# pool.join()
#
# results = np.array(results)
#
# for v in total_list:
# for i in xrange(datas[0]):
# for j in xrange(datas[1]):
# result_map[v][i, j] = results[i, j].get(v, 0)
#
# return result_map
def fit_pixel(y, expected_matrix, constant_weight=10):
"""
Non-negative linear fitting is applied for each pixel.
Parameters
----------
y : array
spectrum of experiment data
expected_matrix : array
2D matrix of activated element spectrum
constant_weight : float
value used to calculate weight like so:
weights = constant_weight / (constant_weight + spectrum)
Returns
-------
results : array
weights of different element
residue : array
error
"""
if constant_weight:
results, residue = weighted_nnls_fit(y, expected_matrix, constant_weight=constant_weight)
else:
results, residue = nnls_fit(y, expected_matrix)
return results, residue
def fit_pixel_slow_version(data, param, c_val=1e-2, fit_num=10, c_weight=1):
datas = data.shape
x0 = np.arange(datas[2])
elist = param['non_fitting_values']['element_list'].split(', ')
elist = [e.strip(' ') for e in elist]
result_map = dict()
for v in elist:
result_map.update({v: np.zeros([datas[0], datas[1]])})
MS = ModelSpectrum(param)
MS.model_spectrum()
for i in xrange(datas[0]):
logger.info('Row number at {} out of total {}'.format(i, datas[0]))
for j in xrange(datas[1]):
logger.info('Column number at {} out of total {}'.format(j, datas[1]))
y0 = data[i, j, :]
result = MS.model_fit(x0, y0,
w=1/np.sqrt(c_weight+y0))
#maxfev=fit_num, xtol=c_val, ftol=c_val, gtol=c_val)
#for k, v in six.iteritems(result.values):
# print('result {}: {}'.format(k, v))
# save result
for v in elist:
if '_L' in v:
line_name = v.split('_')[0]+'_la1_area'
elif '_M' in v:
line_name = v.split('_')[0]+'_ma1_area'
else:
line_name = v+'_ka1_area'
result_map[v][i, j] = result.values[line_name]
return result_map
def write_to_hdf(fpath, data_dict):
"""
Add fitting results to existing h5 file. This is to be moved to filestore.
Parameters
----------
fpath : str
path of the hdf5 file
data_dict : dict
dict of array
"""
import h5py
f = h5py.File(fpath, 'r+')
det = 'det1'
dataGrp = f['xrfmap/'+det]
data = []
namelist = []
for k, v in six.iteritems(data_dict):
namelist.append(str(k))
data.append(v)
if 'xrf_fit' in dataGrp:
del dataGrp['xrf_fit']
data = np.array(data)
ds_data = dataGrp.create_dataset('xrf_fit', data=data)
ds_data.attrs['comments'] = 'All fitting values are saved.'
if 'xrf_fit_name' in dataGrp:
del dataGrp['xrf_fit_name']
name_data = dataGrp.create_dataset('xrf_fit_name', data=namelist)
name_data.attrs['comments'] = 'All elements for fitting are saved.'
f.close()
def compare_result(m, n, start_i=151, end_i=1350, all=True, linear=True):
import h5py
import matplotlib.pyplot as plt
x = np.arange(end_i-start_i)
fpath = '/Users/Li/Downloads/xrf_data/Root.h5'
myfile = h5py.File(fpath, 'r')
data_exp = myfile['xrfmap/det1/counts']
fpath_fit = '/Users/Li/Downloads/xrf_data/fit_data.npy'
d_fit = np.load(fpath_fit)
if not all:
if linear:
plt.plot(x, data_exp[m, n, start_i:end_i], x, d_fit[m, n, :])
else:
plt.semilogy(x, data_exp[m, n, start_i:end_i], x, d_fit[m, n, :])
plt.show()
else:
if linear:
plt.plot(x, np.sum(data_exp[:,:, start_i:end_i], axis=(0, 1)), x, np.sum(d_fit, axis=(0, 1)))
else:
plt.semilogy(x, np.sum(data_exp[:,:, start_i:end_i], axis=(0, 1)), x, np.sum(d_fit, axis=(0, 1)))
plt.show()
|
bsd-3-clause
|
sumspr/scikit-learn
|
sklearn/tests/test_cross_validation.py
|
27
|
41664
|
"""Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.